]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/commitdiff
Merge remote-tracking branch 'amarx/firewall' into fifteen
authorMichael Tremer <michael.tremer@ipfire.org>
Wed, 28 Aug 2013 09:33:20 +0000 (11:33 +0200)
committerMichael Tremer <michael.tremer@ipfire.org>
Wed, 28 Aug 2013 09:33:20 +0000 (11:33 +0200)
111 files changed:
config/cfgroot/header.pl
config/cfgroot/useragents
config/kernel/kernel.config.i586-ipfire
config/kernel/kernel.config.i586-ipfire-pae
config/rootfiles/common/HTML-Template [new file with mode: 0644]
config/rootfiles/common/armv5tel/u-boot
config/rootfiles/common/armv5tel/u-boot-panda [new file with mode: 0644]
config/rootfiles/common/iptables
config/rootfiles/common/libnl
config/rootfiles/common/squid
config/rootfiles/oldcore/70/exclude [moved from config/rootfiles/core/70/exclude with 100% similarity]
config/rootfiles/oldcore/70/filelists/armv5tel/linux-kirkwood [moved from config/rootfiles/core/70/filelists/armv5tel/linux-kirkwood with 100% similarity]
config/rootfiles/oldcore/70/filelists/armv5tel/linux-omap [moved from config/rootfiles/core/70/filelists/armv5tel/linux-omap with 100% similarity]
config/rootfiles/oldcore/70/filelists/armv5tel/linux-rpi [moved from config/rootfiles/core/70/filelists/armv5tel/linux-rpi with 100% similarity]
config/rootfiles/oldcore/70/filelists/crda [moved from config/rootfiles/core/70/filelists/crda with 100% similarity]
config/rootfiles/oldcore/70/filelists/files [moved from config/rootfiles/core/70/filelists/files with 100% similarity]
config/rootfiles/oldcore/70/filelists/i586/grub.conf [moved from config/rootfiles/core/70/filelists/i586/grub.conf with 100% similarity]
config/rootfiles/oldcore/70/filelists/i586/linux [moved from config/rootfiles/core/70/filelists/i586/linux with 100% similarity]
config/rootfiles/oldcore/70/filelists/iw [moved from config/rootfiles/core/70/filelists/iw with 100% similarity]
config/rootfiles/oldcore/70/filelists/libjpeg [moved from config/rootfiles/core/70/filelists/libjpeg with 100% similarity]
config/rootfiles/oldcore/70/filelists/wireless-regdb [moved from config/rootfiles/core/70/filelists/wireless-regdb with 100% similarity]
config/rootfiles/oldcore/70/meta [moved from config/rootfiles/core/70/meta with 100% similarity]
config/rootfiles/oldcore/70/update.sh [moved from config/rootfiles/core/70/update.sh with 100% similarity]
config/rootfiles/oldcore/71/exclude [moved from config/rootfiles/core/71/exclude with 100% similarity]
config/rootfiles/oldcore/71/filelists/GeoIP [moved from config/rootfiles/core/71/filelists/GeoIP with 100% similarity]
config/rootfiles/oldcore/71/filelists/curl [moved from config/rootfiles/core/71/filelists/curl with 100% similarity]
config/rootfiles/oldcore/71/filelists/files [moved from config/rootfiles/core/71/filelists/files with 100% similarity]
config/rootfiles/oldcore/71/filelists/hwdata [moved from config/rootfiles/core/71/filelists/hwdata with 100% similarity]
config/rootfiles/oldcore/71/filelists/jwhois [moved from config/rootfiles/core/71/filelists/jwhois with 100% similarity]
config/rootfiles/oldcore/71/filelists/oinkmaster [moved from config/rootfiles/core/71/filelists/oinkmaster with 100% similarity]
config/rootfiles/oldcore/71/filelists/snort [moved from config/rootfiles/core/71/filelists/snort with 100% similarity]
config/rootfiles/oldcore/71/filelists/squid [moved from config/rootfiles/core/71/filelists/squid with 100% similarity]
config/rootfiles/oldcore/71/filelists/usb_modeswitch [moved from config/rootfiles/core/71/filelists/usb_modeswitch with 100% similarity]
config/rootfiles/oldcore/71/filelists/usb_modeswitch_data [moved from config/rootfiles/core/71/filelists/usb_modeswitch_data with 100% similarity]
config/rootfiles/oldcore/71/meta [moved from config/rootfiles/core/71/meta with 100% similarity]
config/rootfiles/oldcore/71/update.sh [moved from config/rootfiles/core/71/update.sh with 100% similarity]
config/rootfiles/oldcore/72/exclude [moved from config/rootfiles/core/72/exclude with 100% similarity]
config/rootfiles/oldcore/72/filelists/daq [moved from config/rootfiles/core/72/filelists/daq with 100% similarity]
config/rootfiles/oldcore/72/filelists/files [moved from config/rootfiles/core/72/filelists/files with 100% similarity]
config/rootfiles/oldcore/72/filelists/i586/strongswan-padlock [moved from config/rootfiles/core/72/filelists/i586/strongswan-padlock with 100% similarity]
config/rootfiles/oldcore/72/filelists/snort [moved from config/rootfiles/core/72/filelists/snort with 100% similarity]
config/rootfiles/oldcore/72/filelists/squid [moved from config/rootfiles/core/72/filelists/squid with 100% similarity]
config/rootfiles/oldcore/72/filelists/strongswan [moved from config/rootfiles/core/72/filelists/strongswan with 100% similarity]
config/rootfiles/oldcore/72/meta [moved from config/rootfiles/core/72/meta with 100% similarity]
config/rootfiles/oldcore/72/update.sh [moved from config/rootfiles/core/72/update.sh with 100% similarity]
config/rootfiles/oldcore/73/exclude [new file with mode: 0644]
config/rootfiles/oldcore/73/filelists/HTML-Template [new symlink]
config/rootfiles/oldcore/73/filelists/armv5tel/ath-modul [new file with mode: 0644]
config/rootfiles/oldcore/73/filelists/files [new file with mode: 0644]
config/rootfiles/oldcore/73/filelists/i586/ath-modul [new file with mode: 0644]
config/rootfiles/oldcore/73/filelists/squid [new symlink]
config/rootfiles/oldcore/73/meta [new file with mode: 0644]
config/rootfiles/oldcore/73/update.sh [new file with mode: 0644]
config/rootfiles/packages/iptraf-ng [new file with mode: 0644]
config/rootfiles/packages/tor
config/rootfiles/packages/wavemon [new file with mode: 0644]
config/rootfiles/packages/xinetd [new file with mode: 0644]
config/xinetd/xinetd.conf [new file with mode: 0644]
doc/language_issues.es
doc/language_issues.fr
doc/language_issues.nl
doc/language_issues.pl
doc/language_issues.ru
doc/language_issues.tr
doc/language_missings
html/cgi-bin/logs.cgi/proxylog.dat
html/cgi-bin/ovpnmain.cgi
html/cgi-bin/proxy.cgi
html/cgi-bin/tor.cgi [changed mode: 0644->0755]
html/cgi-bin/urlfilter.cgi
html/html/redirect-templates/legacy/template.html [new file with mode: 0644]
html/html/redirect.cgi
langs/de/cgi-bin/de.pl
langs/en/cgi-bin/en.pl
lfs/HTML-Template [new file with mode: 0644]
lfs/bridge-utils
lfs/compat-drivers
lfs/hostapd
lfs/iptables
lfs/iptraf-ng [new file with mode: 0644]
lfs/iw
lfs/keepalived
lfs/libnl
lfs/linux
lfs/miniupnpd
lfs/net-tools
lfs/samba
lfs/squid
lfs/tor
lfs/transmission
lfs/u-boot
lfs/u-boot-panda [new file with mode: 0644]
lfs/w_scan
lfs/wavemon [new file with mode: 0644]
lfs/xinetd [new file with mode: 0644]
make.sh
src/initscripts/init.d/squid
src/initscripts/init.d/tor
src/paks/xinetd/install.sh [new file with mode: 0644]
src/paks/xinetd/uninstall.sh [new file with mode: 0644]
src/paks/xinetd/update.sh [new file with mode: 0644]
src/patches/bridge-utils-1.5-compile-fix-1.patch [new file with mode: 0644]
src/patches/compat-drivers-3.8.3-ath_ignore_eeprom_regdomain.patch [new file with mode: 0644]
src/patches/grsecurity-2.9.1-3.10.9-201308202015.patch [new file with mode: 0644]
src/patches/imq_kernel3.10.patch [new file with mode: 0644]
src/patches/linux-2.6-silence-acpi-blacklist.patch [new file with mode: 0644]
src/patches/linux-2.6.30-no-pcspkr-modalias.patch [new file with mode: 0644]
src/patches/linux-3.10-ipp2p-0.8.2-ipfire.patch [new file with mode: 0644]
src/patches/linux-3.7-disable-compat_vdso.patch [new file with mode: 0644]
src/patches/net-tools-1.60-kernel_headers-3.patch [new file with mode: 0644]
src/patches/netfilter_layer7_2.22_kernel3.10-no_proc_interface.patch [new file with mode: 0644]

index e0f18df153521a87e6c431321c69ed241de7f84a..fb574822228e18e9ed466aaebf0876c3aa878f5f 100644 (file)
@@ -12,6 +12,7 @@
 package Header;
 
 use CGI();
+use HTML::Entities();
 use Socket;
 use Time::Local;
 
@@ -302,16 +303,16 @@ sub IpInSubnet
     return (($ip >= $start) && ($ip <= $end));
 }
 
-sub cleanhtml
-{
+sub escape($) {
+       my $s = shift;
+       return HTML::Entities::encode_entities($s);
+}
+
+sub cleanhtml {
        my $outstring =$_[0];
        $outstring =~ tr/,/ / if not defined $_[1] or $_[1] ne 'y';
-       $outstring =~ s/&/&amp;/g;
-       $outstring =~ s/\'/&#039;/g;
-       $outstring =~ s/\"/&quot;/g; #" This is just a workaround for the syntax highlighter
-       $outstring =~ s/</&lt;/g;
-       $outstring =~ s/>/&gt;/g;
-       return $outstring;
+
+       return escape($outstring);
 }
 
 sub connectionstatus
index 5fe3446c6176ade7b7bda41f5cc79b810623aaf8..d5164fd63545c3c06d1cbb69c49d57674d0f1bb3 100644 (file)
@@ -11,7 +11,7 @@ GOZILLA,Go!Zilla,(Go!Zilla)
 GOOGLE,Google Toolbar,(Google\sToolbar)
 JAVA,Java,(Java)
 KONQUEROR,Konqueror,(Konqueror)
-LIBWWWPERL,libwww-perl,(libww-perl)
+LIBWWWPERL,libwww-perl,(libwww-perl)
 LYNX,Lynx,(Lynx)
 MSIE,Internet Explorer,(MSIE.*[)]$)
 NETSCAPE,Netscape,(^Mozilla\/4.[7|8])|(Netscape)
index ae5e3718a066cbedaa443f16df2f7a49acad6f7c..131b8dfe85b9d53d986445449138eaa4517bb967 100644 (file)
@@ -1,46 +1,34 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.2.47 Kernel Configuration
+# Linux/x86 3.10.9 Kernel Configuration
 #
 # CONFIG_64BIT is not set
 CONFIG_X86_32=y
-# CONFIG_X86_64 is not set
 CONFIG_X86=y
 CONFIG_INSTRUCTION_DECODER=y
 CONFIG_OUTPUT_FORMAT="elf32-i386"
 CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_HAVE_LATENCYTOP_SUPPORT=y
 CONFIG_MMU=y
-CONFIG_ZONE_DMA=y
 CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_NEED_SG_DMA_LENGTH=y
 CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_IOMAP=y
 CONFIG_GENERIC_BUG=y
 CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_GPIO=y
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
 CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
-# CONFIG_GENERIC_TIME_VSYSCALL is not set
 CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_DEFAULT_IDLE=y
 CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
 CONFIG_HAVE_SETUP_PER_CPU_AREA=y
 CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
 CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
 CONFIG_ARCH_HIBERNATION_POSSIBLE=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_ZONE_DMA32 is not set
-CONFIG_ARCH_POPULATES_NODE_MAP=y
 # CONFIG_AUDIT_ARCH is not set
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -48,16 +36,15 @@ CONFIG_HAVE_INTEL_TXT=y
 CONFIG_X86_32_SMP=y
 CONFIG_X86_HT=y
 CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
-CONFIG_KTIME_SCALAR=y
 CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-CONFIG_HAVE_IRQ_WORK=y
 CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
 
 #
 # General setup
 #
-CONFIG_EXPERIMENTAL=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_CROSS_COMPILE=""
 CONFIG_LOCALVERSION=""
@@ -69,8 +56,8 @@ CONFIG_HAVE_KERNEL_XZ=y
 CONFIG_HAVE_KERNEL_LZO=y
 # CONFIG_KERNEL_GZIP is not set
 # CONFIG_KERNEL_BZIP2 is not set
-CONFIG_KERNEL_LZMA=y
-# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZMA is not set
+CONFIG_KERNEL_XZ=y
 # CONFIG_KERNEL_LZO is not set
 CONFIG_DEFAULT_HOSTNAME="(none)"
 CONFIG_SWAP=y
@@ -78,54 +65,99 @@ CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_POSIX_MQUEUE_SYSCTL=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-# CONFIG_FHANDLE is not set
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-# CONFIG_TASK_XACCT is not set
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_WATCH=y
 CONFIG_AUDIT_TREE=y
+CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
 CONFIG_HAVE_GENERIC_HARDIRQS=y
 
 #
 # IRQ subsystem
 #
 CONFIG_GENERIC_HARDIRQS=y
-CONFIG_HAVE_SPARSE_IRQ=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_IRQ_SHOW=y
 CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
 CONFIG_IRQ_FORCED_THREADING=y
 CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 
 #
 # RCU Subsystem
 #
 CONFIG_TREE_RCU=y
 # CONFIG_PREEMPT_RCU is not set
-# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_STALL_COMMON=y
 CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
 # CONFIG_RCU_FANOUT_EXACT is not set
-# CONFIG_RCU_FAST_NO_HZ is not set
+CONFIG_RCU_FAST_NO_HZ=y
 # CONFIG_TREE_RCU_TRACE is not set
+CONFIG_RCU_NOCB_CPU=y
+# CONFIG_RCU_NOCB_CPU_NONE is not set
+# CONFIG_RCU_NOCB_CPU_ZERO is not set
+CONFIG_RCU_NOCB_CPU_ALL=y
 # CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-# CONFIG_CGROUPS is not set
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
-# CONFIG_SCHED_AUTOGROUP is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_RELAY is not set
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_GZIP=y
@@ -136,17 +168,19 @@ CONFIG_RD_LZO=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
-# CONFIG_EXPERT is not set
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
 CONFIG_UID16=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_HOTPLUG=y
+CONFIG_KALLSYMS_ALL=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_PCSPKR_PLATFORM=y
-CONFIG_HAVE_PCSPKR_PLATFORM=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
@@ -155,46 +189,69 @@ CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
-# CONFIG_EMBEDDED is not set
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
 CONFIG_HAVE_PERF_EVENTS=y
 
 #
 # Kernel Performance Events And Counters
 #
 CONFIG_PERF_EVENTS=y
-# CONFIG_PERF_COUNTERS is not set
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_PCI_QUIRKS=y
 CONFIG_SLUB_DEBUG=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
+# CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
 CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
 # CONFIG_KPROBES is not set
-# CONFIG_JUMP_LABEL is not set
+CONFIG_JUMP_LABEL=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
 CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
 CONFIG_USER_RETURN_NOTIFIER=y
 CONFIG_HAVE_IOREMAP_PROT=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
 CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
 CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
 CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
 CONFIG_HAVE_DMA_API_DEBUG=y
 CONFIG_HAVE_HW_BREAKPOINT=y
 CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
 CONFIG_HAVE_USER_RETURN_NOTIFIER=y
 CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
 CONFIG_HAVE_ARCH_JUMP_LABEL=y
 CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
 
 #
 # GCOV-based kernel profiling
 #
+# CONFIG_GCOV_KERNEL is not set
 CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -202,15 +259,40 @@ CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
 # CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_MODULE_SIG is not set
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 CONFIG_LBDAF=y
 CONFIG_BLK_DEV_BSG=y
 CONFIG_BLK_DEV_BSGLIB=y
-# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
 
 #
 # IO Schedulers
@@ -218,74 +300,53 @@ CONFIG_BLK_DEV_BSGLIB=y
 CONFIG_IOSCHED_NOOP=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
 # CONFIG_DEFAULT_DEADLINE is not set
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
 CONFIG_PREEMPT_NOTIFIERS=y
 CONFIG_PADATA=y
-# CONFIG_INLINE_SPIN_TRYLOCK is not set
-# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK is not set
-# CONFIG_INLINE_SPIN_LOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-CONFIG_INLINE_SPIN_UNLOCK=y
-# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_ASN1=m
 CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
-# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_READ_TRYLOCK is not set
-# CONFIG_INLINE_READ_LOCK is not set
-# CONFIG_INLINE_READ_LOCK_BH is not set
-# CONFIG_INLINE_READ_LOCK_IRQ is not set
-# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
 CONFIG_INLINE_READ_UNLOCK=y
-# CONFIG_INLINE_READ_UNLOCK_BH is not set
 CONFIG_INLINE_READ_UNLOCK_IRQ=y
-# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_WRITE_TRYLOCK is not set
-# CONFIG_INLINE_WRITE_LOCK is not set
-# CONFIG_INLINE_WRITE_LOCK_BH is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
 CONFIG_INLINE_WRITE_UNLOCK=y
-# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
 CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
-# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
 CONFIG_MUTEX_SPIN_ON_OWNER=y
 CONFIG_FREEZER=y
 
 #
 # Processor type and features
 #
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_ZONE_DMA=y
 CONFIG_SMP=y
 CONFIG_X86_MPPARSE=y
-# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_BIGSMP=y
 CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
 # CONFIG_X86_WANT_INTEL_MID is not set
+CONFIG_X86_INTEL_LPSS=y
 # CONFIG_X86_RDC321X is not set
-# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_32_NON_STANDARD=y
+# CONFIG_X86_NUMAQ is not set
 CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
-# CONFIG_X86_32_IRIS is not set
+# CONFIG_STA2X11 is not set
+# CONFIG_X86_SUMMIT is not set
+# CONFIG_X86_ES7000 is not set
+CONFIG_X86_32_IRIS=m
 CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_PARAVIRT_GUEST=y
-# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+# CONFIG_PARAVIRT_SPINLOCKS is not set
 # CONFIG_XEN_PRIVILEGED_GUEST is not set
-CONFIG_KVM_CLOCK=y
 CONFIG_KVM_GUEST=y
-CONFIG_LGUEST_GUEST=y
-CONFIG_PARAVIRT=y
-CONFIG_PARAVIRT_SPINLOCKS=y
+# CONFIG_LGUEST_GUEST is not set
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
 CONFIG_PARAVIRT_CLOCK=y
-# CONFIG_PARAVIRT_DEBUG is not set
 CONFIG_NO_BOOTMEM=y
 # CONFIG_MEMTEST is not set
-# CONFIG_M386 is not set
 # CONFIG_M486 is not set
 # CONFIG_M586 is not set
 CONFIG_M586TSC=y
@@ -312,21 +373,13 @@ CONFIG_M586TSC=y
 # CONFIG_MATOM is not set
 CONFIG_X86_GENERIC=y
 CONFIG_X86_INTERNODE_CACHE_SHIFT=6
-CONFIG_X86_CMPXCHG=y
-CONFIG_CMPXCHG_LOCAL=y
-CONFIG_CMPXCHG_DOUBLE=y
 CONFIG_X86_L1_CACHE_SHIFT=6
-CONFIG_X86_XADD=y
-CONFIG_X86_PPRO_FENCE=y
-CONFIG_X86_F00F_BUG=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
+# CONFIG_X86_PPRO_FENCE is not set
 CONFIG_X86_ALIGNMENT_16=y
 CONFIG_X86_INTEL_USERCOPY=y
 CONFIG_X86_TSC=y
 CONFIG_X86_MINIMUM_CPU_FAMILY=4
+# CONFIG_PROCESSOR_SELECT is not set
 CONFIG_CPU_SUP_INTEL=y
 CONFIG_CPU_SUP_CYRIX_32=y
 CONFIG_CPU_SUP_AMD=y
@@ -336,11 +389,9 @@ CONFIG_CPU_SUP_UMC_32=y
 CONFIG_HPET_TIMER=y
 CONFIG_HPET_EMULATE_RTC=y
 CONFIG_DMI=y
-# CONFIG_IOMMU_HELPER is not set
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=32
 CONFIG_SCHED_SMT=y
 CONFIG_SCHED_MC=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
@@ -350,27 +401,32 @@ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
 CONFIG_X86_MCE=y
 CONFIG_X86_MCE_INTEL=y
 CONFIG_X86_MCE_AMD=y
-CONFIG_X86_ANCIENT_MCE=y
+# CONFIG_X86_ANCIENT_MCE is not set
 CONFIG_X86_MCE_THRESHOLD=y
-CONFIG_X86_MCE_INJECT=m
+# CONFIG_X86_MCE_INJECT is not set
 CONFIG_X86_THERMAL_VECTOR=y
 CONFIG_VM86=y
 CONFIG_TOSHIBA=m
 CONFIG_I8K=m
-CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_X86_REBOOTFIXUPS is not set
 CONFIG_MICROCODE=m
 CONFIG_MICROCODE_INTEL=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
+CONFIG_MICROCODE_INTEL_LIB=y
+CONFIG_MICROCODE_INTEL_EARLY=y
+CONFIG_MICROCODE_EARLY=y
 CONFIG_X86_CPUID=y
 # CONFIG_NOHIGHMEM is not set
 CONFIG_HIGHMEM4G=y
 # CONFIG_HIGHMEM64G is not set
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_3G_OPT is not set
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
+# CONFIG_VMSPLIT_1G is not set
 CONFIG_PAGE_OFFSET=0xC0000000
 CONFIG_HIGHMEM=y
-# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
-# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
@@ -382,9 +438,15 @@ CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 CONFIG_SPARSEMEM_STATIC=y
 CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_MEMORY_ISOLATION=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_COMPACTION is not set
+CONFIG_BALLOON_COMPACTION=y
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
 # CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
@@ -393,21 +455,29 @@ CONFIG_MMU_NOTIFIER=y
 CONFIG_KSM=y
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
-# CONFIG_MEMORY_FAILURE is not set
-# CONFIG_TRANSPARENT_HUGEPAGE is not set
-# CONFIG_CLEANCACHE is not set
-# CONFIG_HIGHPTE is not set
-# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_CLEANCACHE=y
+# CONFIG_FRONTSWAP is not set
+CONFIG_HIGHPTE=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
 CONFIG_X86_RESERVE_LOW=64
-CONFIG_MATH_EMULATION=y
+# CONFIG_MATH_EMULATION is not set
 CONFIG_MTRR=y
-# CONFIG_MTRR_SANITIZER is not set
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
 CONFIG_X86_PAT=y
 CONFIG_ARCH_USES_PG_UNCACHED=y
 CONFIG_ARCH_RANDOM=y
-# CONFIG_EFI is not set
+CONFIG_X86_SMAP=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
 CONFIG_SECCOMP=y
-CONFIG_CC_STACKPROTECTOR=y
 # CONFIG_HZ_100 is not set
 # CONFIG_HZ_250 is not set
 CONFIG_HZ_300=y
@@ -415,12 +485,14 @@ CONFIG_HZ_300=y
 CONFIG_HZ=300
 CONFIG_SCHED_HRTICK=y
 # CONFIG_KEXEC is not set
-# CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x1000000
-# CONFIG_RELOCATABLE is not set
-CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x400000
+CONFIG_RELOCATABLE=y
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x400000
 CONFIG_HOTPLUG_CPU=y
-CONFIG_COMPAT_VDSO=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
 # CONFIG_CMDLINE_BOOL is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 
@@ -429,43 +501,56 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 #
 CONFIG_SUSPEND=y
 CONFIG_SUSPEND_FREEZER=y
-# CONFIG_HIBERNATION is not set
+CONFIG_HIBERNATE_CALLBACKS=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
 CONFIG_PM_SLEEP=y
 CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_WAKELOCKS is not set
 CONFIG_PM_RUNTIME=y
 CONFIG_PM=y
 # CONFIG_PM_DEBUG is not set
+CONFIG_PM_CLK=y
 CONFIG_ACPI=y
 CONFIG_ACPI_SLEEP=y
-# CONFIG_ACPI_PROCFS is not set
-CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_PROCFS=y
+# CONFIG_ACPI_PROCFS_POWER is not set
 CONFIG_ACPI_EC_DEBUGFS=m
-CONFIG_ACPI_PROC_EVENT=y
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
+# CONFIG_ACPI_PROC_EVENT is not set
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BUTTON=y
 CONFIG_ACPI_VIDEO=m
-CONFIG_ACPI_FAN=m
+CONFIG_ACPI_FAN=y
 CONFIG_ACPI_DOCK=y
-CONFIG_ACPI_PROCESSOR=m
+CONFIG_ACPI_I2C=m
+CONFIG_ACPI_PROCESSOR=y
 CONFIG_ACPI_IPMI=m
 CONFIG_ACPI_HOTPLUG_CPU=y
 CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
-CONFIG_ACPI_THERMAL=m
-CONFIG_ACPI_CUSTOM_DSDT_FILE=""
+CONFIG_ACPI_THERMAL=y
 # CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ACPI_BLACKLIST_YEAR=0
+CONFIG_ACPI_INITRD_TABLE_OVERRIDE=y
+CONFIG_ACPI_BLACKLIST_YEAR=1999
 # CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_PCI_SLOT=m
+CONFIG_ACPI_PCI_SLOT=y
 CONFIG_X86_PM_TIMER=y
-CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_CONTAINER=y
 CONFIG_ACPI_SBS=m
-# CONFIG_ACPI_HED is not set
-# CONFIG_ACPI_APEI is not set
-# CONFIG_SFI is not set
+CONFIG_ACPI_HED=y
+CONFIG_ACPI_CUSTOM_METHOD=m
+# CONFIG_ACPI_BGRT is not set
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+# CONFIG_ACPI_APEI_EINJ is not set
+# CONFIG_ACPI_APEI_ERST_DEBUG is not set
+CONFIG_SFI=y
 CONFIG_X86_APM_BOOT=y
-CONFIG_APM=m
-CONFIG_APM_IGNORE_USER_SUSPEND=y
+CONFIG_APM=y
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
 # CONFIG_APM_DO_ENABLE is not set
 CONFIG_APM_CPU_IDLE=y
 # CONFIG_APM_DISPLAY_BLANK is not set
@@ -476,46 +561,52 @@ CONFIG_APM_CPU_IDLE=y
 #
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_TABLE=y
-CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=m
 CONFIG_CPU_FREQ_STAT_DETAILS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
 CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 
 #
 # x86 CPU frequency scaling drivers
 #
-CONFIG_X86_PCC_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ=m
-CONFIG_X86_POWERNOW_K6=m
-CONFIG_X86_POWERNOW_K7=m
+CONFIG_X86_INTEL_PSTATE=y
+CONFIG_X86_PCC_CPUFREQ=y
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_ACPI_CPUFREQ_CPB is not set
+# CONFIG_X86_POWERNOW_K6 is not set
+CONFIG_X86_POWERNOW_K7=y
 CONFIG_X86_POWERNOW_K7_ACPI=y
-CONFIG_X86_POWERNOW_K8=m
-CONFIG_X86_GX_SUSPMOD=m
-CONFIG_X86_SPEEDSTEP_CENTRINO=m
-CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
-CONFIG_X86_SPEEDSTEP_ICH=m
-CONFIG_X86_SPEEDSTEP_SMI=m
-CONFIG_X86_P4_CLOCKMOD=m
-CONFIG_X86_CPUFREQ_NFORCE2=m
-CONFIG_X86_LONGRUN=m
-CONFIG_X86_LONGHAUL=m
-CONFIG_X86_E_POWERSAVER=m
+CONFIG_X86_POWERNOW_K8=y
+# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=y
+CONFIG_X86_P4_CLOCKMOD=y
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+CONFIG_X86_LONGRUN=y
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_E_POWERSAVER is not set
 
 #
 # shared options
 #
-CONFIG_X86_SPEEDSTEP_LIB=m
+CONFIG_X86_SPEEDSTEP_LIB=y
 # CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
 CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
 CONFIG_CPU_IDLE_GOV_LADDER=y
 CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
 CONFIG_INTEL_IDLE=y
 
 #
@@ -525,23 +616,30 @@ CONFIG_PCI=y
 # CONFIG_PCI_GOBIOS is not set
 # CONFIG_PCI_GOMMCONFIG is not set
 # CONFIG_PCI_GODIRECT is not set
+# CONFIG_PCI_GOOLPC is not set
 CONFIG_PCI_GOANY=y
 CONFIG_PCI_BIOS=y
 CONFIG_PCI_DIRECT=y
 CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_OLPC=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_PCI_CNB20LE_QUIRK is not set
 CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCIEAER=y
-# CONFIG_PCIE_ECRC is not set
-# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIE_ECRC=y
+CONFIG_PCIEAER_INJECT=m
 CONFIG_PCIEASPM=y
 # CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+CONFIG_PCIEASPM_POWERSAVE=y
+# CONFIG_PCIEASPM_PERFORMANCE is not set
 CONFIG_PCIE_PME=y
 CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_STUB=m
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+CONFIG_PCI_STUB=y
 CONFIG_HT_IRQ=y
 CONFIG_PCI_ATS=y
 CONFIG_PCI_IOV=y
@@ -550,23 +648,19 @@ CONFIG_PCI_IOV=y
 CONFIG_PCI_IOAPIC=y
 CONFIG_PCI_LABEL=y
 CONFIG_ISA_DMA_API=y
-CONFIG_ISA=y
-CONFIG_EISA=y
-CONFIG_EISA_VLB_PRIMING=y
-CONFIG_EISA_PCI_EISA=y
-CONFIG_EISA_VIRTUAL_ROOT=y
-CONFIG_EISA_NAMES=y
-CONFIG_MCA=y
-CONFIG_MCA_LEGACY=y
-# CONFIG_MCA_PROC_FS is not set
-CONFIG_SCx200=m
-CONFIG_SCx200HR_TIMER=m
-# CONFIG_OLPC is not set
-CONFIG_ALIX=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+CONFIG_OLPC=y
+CONFIG_OLPC_XO1_PM=y
+CONFIG_OLPC_XO1_RTC=y
+CONFIG_OLPC_XO1_SCI=y
+CONFIG_OLPC_XO15_SCI=y
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
 CONFIG_AMD_NB=y
 CONFIG_PCCARD=m
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_LOAD_CIS=y
+# CONFIG_PCMCIA is not set
 CONFIG_CARDBUS=y
 
 #
@@ -578,23 +672,25 @@ CONFIG_YENTA_RICOH=y
 CONFIG_YENTA_TI=y
 CONFIG_YENTA_ENE_TUNE=y
 CONFIG_YENTA_TOSHIBA=y
-CONFIG_PD6729=m
-CONFIG_I82092=m
-CONFIG_I82365=m
-CONFIG_TCIC=m
-CONFIG_PCMCIA_PROBE=y
-CONFIG_PCCARD_NONSTATIC=y
-# CONFIG_HOTPLUG_PCI is not set
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_HOTPLUG_PCI_ACPI_IBM=m
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
 # CONFIG_RAPIDIO is not set
 
 #
 # Executable file formats / Emulations
 #
 CONFIG_BINFMT_ELF=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
 CONFIG_HAVE_AOUT=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
 CONFIG_HAVE_ATOMIC_IOMAP=y
 CONFIG_HAVE_TEXT_POKE_SMP=y
 CONFIG_NET=y
@@ -603,10 +699,13 @@ CONFIG_NET=y
 # Networking options
 #
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
 CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
 CONFIG_XFRM_MIGRATE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_XFRM_IPCOMP=m
@@ -615,7 +714,7 @@ CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
-# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_FIB_TRIE_STATS=y
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_MULTIPATH=y
 CONFIG_IP_ROUTE_VERBOSE=y
@@ -623,28 +722,31 @@ CONFIG_IP_ROUTE_CLASSID=y
 # CONFIG_IP_PNP is not set
 CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IP_TUNNEL=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
-# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
+# CONFIG_ARPD is not set
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_TUNNEL=m
 CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_LRO=y
 CONFIG_INET_DIAG=m
 CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
 CONFIG_TCP_CONG_BIC=m
-CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_CUBIC=y
 CONFIG_TCP_CONG_WESTWOOD=m
 CONFIG_TCP_CONG_HTCP=m
 CONFIG_TCP_CONG_HSTCP=m
@@ -655,13 +757,15 @@ CONFIG_TCP_CONG_LP=m
 CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_DEFAULT_RENO=y
-CONFIG_DEFAULT_TCP_CONG="reno"
-# CONFIG_TCP_MD5SIG is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-# CONFIG_IPV6_PRIVACY is not set
-# CONFIG_IPV6_ROUTER_PREF is not set
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -673,14 +777,18 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
 CONFIG_INET6_XFRM_MODE_BEET=m
 CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
 CONFIG_IPV6_SIT=m
-# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_SIT_6RD=y
 CONFIG_IPV6_NDISC_NODETYPE=y
 CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
-# CONFIG_IPV6_SUBTREES is not set
-# CONFIG_IPV6_MROUTE is not set
-# CONFIG_NETWORK_SECMARK is not set
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_DEBUG is not set
 CONFIG_NETFILTER_ADVANCED=y
@@ -690,13 +798,18 @@ CONFIG_BRIDGE_NETFILTER=y
 # Core Netfilter Configuration
 #
 CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_ACCT=m
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_MARK=y
-# CONFIG_NF_CONNTRACK_ZONES is not set
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_PROCFS=y
 CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_LABELS=y
 CONFIG_NF_CT_PROTO_DCCP=m
 CONFIG_NF_CT_PROTO_GRE=m
 CONFIG_NF_CT_PROTO_SCTP=m
@@ -713,14 +826,28 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_NF_NAT_TFTP=m
 CONFIG_NETFILTER_TPROXY=m
-CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XTABLES=y
 
 #
 # Xtables combined modules
 #
 CONFIG_NETFILTER_XT_MARK=m
 CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
 
 #
 # Xtables targets
@@ -729,20 +856,26 @@ CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
 CONFIG_NETFILTER_XT_TARGET_CT=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_IMQ=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
 CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
 
@@ -750,9 +883,11 @@ CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
 # Xtables matches
 #
 CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
@@ -760,17 +895,19 @@ CONFIG_NETFILTER_XT_MATCH_CPU=m
 CONFIG_NETFILTER_XT_MATCH_DCCP=m
 CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ECN=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
 CONFIG_NETFILTER_XT_MATCH_HL=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_IPVS=m
+# CONFIG_NETFILTER_XT_MATCH_IPVS is not set
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
@@ -790,7 +927,19 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
-# CONFIG_IP_SET is not set
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_IP_VS=m
 CONFIG_IP_VS_IPV6=y
 # CONFIG_IP_VS_DEBUG is not set
@@ -820,6 +969,11 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 
+#
+# IPVS SH scheduler
+#
+CONFIG_IP_VS_SH_TAB_BITS=8
+
 #
 # IPVS application helper
 #
@@ -830,40 +984,31 @@ CONFIG_IP_VS_PE_SIP=m
 #
 # IP: Netfilter Configuration
 #
-CONFIG_NF_DEFRAG_IPV4=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_NF_NAT_NEEDED=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_NF_NAT_PROTO_DCCP=m
 CONFIG_NF_NAT_PROTO_GRE=m
-CONFIG_NF_NAT_PROTO_UDPLITE=m
-CONFIG_NF_NAT_PROTO_SCTP=m
-CONFIG_NF_NAT_FTP=m
-CONFIG_NF_NAT_IRC=m
-CONFIG_NF_NAT_TFTP=m
-CONFIG_NF_NAT_AMANDA=m
 CONFIG_NF_NAT_PPTP=m
 CONFIG_NF_NAT_H323=m
-CONFIG_NF_NAT_SIP=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
 CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
@@ -872,10 +1017,9 @@ CONFIG_IP_NF_MATCH_IPP2P=m
 #
 # IPv6: Netfilter Configuration
 #
-CONFIG_NF_DEFRAG_IPV6=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -883,13 +1027,17 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -910,49 +1058,52 @@ CONFIG_BRIDGE_EBT_MARK_T=m
 CONFIG_BRIDGE_EBT_REDIRECT=m
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_ULOG is not set
 CONFIG_BRIDGE_EBT_NFLOG=m
 # CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
+CONFIG_SCTP_COOKIE_HMAC_MD5=y
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 # CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 CONFIG_ATM=m
 CONFIG_ATM_CLIP=m
-CONFIG_ATM_CLIP_NO_ICMP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
 # CONFIG_ATM_LANE is not set
 CONFIG_ATM_BR2684=m
 # CONFIG_ATM_BR2684_IPFILTER is not set
 CONFIG_L2TP=m
+# CONFIG_L2TP_DEBUGFS is not set
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_STP=m
-CONFIG_GARP=m
-CONFIG_BRIDGE=m
+CONFIG_STP=y
+CONFIG_GARP=y
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_HAVE_NET_DSA=y
 CONFIG_NET_DSA=y
 CONFIG_NET_DSA_TAG_DSA=y
 CONFIG_NET_DSA_TAG_EDSA=y
 CONFIG_NET_DSA_TAG_TRAILER=y
-CONFIG_NET_DSA_MV88E6XXX=y
-CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123_61_65=y
-CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q=y
 CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_VLAN_8021Q_MVRP is not set
 # CONFIG_DECNET is not set
-CONFIG_LLC=m
+CONFIG_LLC=y
 # CONFIG_LLC2 is not set
 # CONFIG_IPX is not set
 # CONFIG_ATALK is not set
 # CONFIG_X25 is not set
 # CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
 # CONFIG_PHONET is not set
-CONFIG_IEEE802154=m
-CONFIG_IEEE802154_6LOWPAN=m
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
 
 #
@@ -976,7 +1127,10 @@ CONFIG_NET_SCH_DRR=m
 CONFIG_NET_SCH_MQPRIO=m
 CONFIG_NET_SCH_CHOKE=m
 CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
 
 #
 # Classification
@@ -992,6 +1146,7 @@ CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=m
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 CONFIG_NET_EMATCH_CMP=m
@@ -999,6 +1154,7 @@ CONFIG_NET_EMATCH_NBYTE=m
 CONFIG_NET_EMATCH_U32=m
 CONFIG_NET_EMATCH_META=m
 CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_IPSET=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
@@ -1015,67 +1171,30 @@ CONFIG_NET_SCH_FIFO=y
 # CONFIG_DCB is not set
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_BLA=y
+CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
 # CONFIG_BATMAN_ADV_DEBUG is not set
+CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VMWARE_VMCI_VSOCKETS=m
+CONFIG_NETLINK_MMAP=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_RPS=y
 CONFIG_RFS_ACCEL=y
 CONFIG_XPS=y
+CONFIG_NETPRIO_CGROUP=m
+CONFIG_BQL=y
 
 #
 # Network testing
 #
 # CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-# CONFIG_IRDA_ULTRA is not set
-
-#
-# IrDA options
-#
-# CONFIG_IRDA_CACHE_LAST_LSAP is not set
-# CONFIG_IRDA_FAST_RR is not set
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-# CONFIG_DONGLE is not set
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_NSC_FIR=m
-CONFIG_WINBOND_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_SMC_IRCC_FIR=m
-CONFIG_ALI_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_VIA_FIR=m
-CONFIG_MCS_FIR=m
+# CONFIG_IRDA is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -1094,21 +1213,16 @@ CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
 CONFIG_BT_HCIUART_ATH3K=y
 CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
 CONFIG_BT_HCIBCM203X=m
 CONFIG_BT_HCIBPA10X=m
 CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIDTL1=m
-CONFIG_BT_HCIBT3C=m
-CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
 CONFIG_BT_HCIVHCI=m
 CONFIG_BT_MRVL=m
 CONFIG_BT_MRVL_SDIO=m
 CONFIG_BT_ATH3K=m
 CONFIG_BT_WILINK=m
-CONFIG_AF_RXRPC=m
-# CONFIG_AF_RXRPC_DEBUG is not set
-CONFIG_RXKAD=m
+# CONFIG_AF_RXRPC is not set
 CONFIG_FIB_RULES=y
 CONFIG_WIRELESS=y
 CONFIG_WIRELESS_EXT=y
@@ -1120,10 +1234,11 @@ CONFIG_CFG80211=m
 # CONFIG_NL80211_TESTMODE is not set
 # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
 # CONFIG_CFG80211_REG_DEBUG is not set
-# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
 # CONFIG_CFG80211_INTERNAL_REGDB is not set
 CONFIG_CFG80211_WEXT=y
-CONFIG_WIRELESS_EXT_SYSFS=y
 CONFIG_LIB80211=m
 CONFIG_LIB80211_CRYPT_WEP=m
 CONFIG_LIB80211_CRYPT_CCMP=m
@@ -1131,23 +1246,24 @@ CONFIG_LIB80211_CRYPT_TKIP=m
 # CONFIG_LIB80211_DEBUG is not set
 CONFIG_MAC80211=m
 CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
 CONFIG_MAC80211_RC_MINSTREL=y
 CONFIG_MAC80211_RC_MINSTREL_HT=y
 CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
 CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
 CONFIG_MAC80211_MESH=y
 CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
 # CONFIG_MAC80211_DEBUG_MENU is not set
 # CONFIG_WIMAX is not set
 CONFIG_RFKILL=m
 CONFIG_RFKILL_LEDS=y
 CONFIG_RFKILL_INPUT=y
-CONFIG_RFKILL_REGULATOR=m
+CONFIG_RFKILL_GPIO=m
 # CONFIG_NET_9P is not set
 # CONFIG_CAIF is not set
-CONFIG_CEPH_LIB=m
-# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
-# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
+# CONFIG_CEPH_LIB is not set
 # CONFIG_NFC is not set
 
 #
@@ -1157,108 +1273,54 @@ CONFIG_CEPH_LIB=m
 #
 # Generic Driver Options
 #
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_DEVTMPFS is not set
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
 CONFIG_REGMAP=y
 CONFIG_REGMAP_I2C=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_TESTS=m
-# CONFIG_MTD_REDBOOT_PARTS is not set
-CONFIG_MTD_AR7_PARTS=m
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-# CONFIG_MTD_BLOCK_RO is not set
-# CONFIG_FTL is not set
-# CONFIG_NFTL is not set
-# CONFIG_INFTL is not set
-# CONFIG_RFD_FTL is not set
-# CONFIG_SSFDC is not set
-CONFIG_SM_FTL=m
-# CONFIG_MTD_OOPS is not set
-# CONFIG_MTD_SWAP is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-# CONFIG_MTD_CFI is not set
-# CONFIG_MTD_JEDECPROBE is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_RAM is not set
-# CONFIG_MTD_ROM is not set
-# CONFIG_MTD_ABSENT is not set
-
-#
-# Mapping drivers for chip access
-#
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_PHYSMAP=m
-# CONFIG_MTD_PHYSMAP_COMPAT is not set
-# CONFIG_MTD_TS5500 is not set
-# CONFIG_MTD_INTEL_VR_NOR is not set
-# CONFIG_MTD_PLATRAM is not set
-
-#
-# Self-contained MTD device drivers
-#
-# CONFIG_MTD_PMC551 is not set
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLOCK2MTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-# CONFIG_MTD_DOC2000 is not set
-# CONFIG_MTD_DOC2001 is not set
-# CONFIG_MTD_DOC2001PLUS is not set
-# CONFIG_MTD_DOCG3 is not set
-CONFIG_MTD_NAND_ECC=m
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND is not set
-# CONFIG_MTD_ONENAND is not set
-
-#
-# LPDDR flash memory drivers
-#
-CONFIG_MTD_LPDDR=m
-CONFIG_MTD_QINFO_PROBE=m
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_RESERVE=1
-# CONFIG_MTD_UBI_GLUEBI is not set
-# CONFIG_MTD_UBI_DEBUG is not set
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+CONFIG_OF=y
+
+#
+# Device Tree and Open Firmware support
+#
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_OF_SELFTEST is not set
+CONFIG_OF_PROMTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=m
+CONFIG_OF_NET=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_PCI=y
+CONFIG_OF_PCI_IRQ=y
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
 CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_PC_FIFO is not set
+# CONFIG_PARPORT_PC_SUPERIO is not set
 # CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
+# CONFIG_PARPORT_AX88796 is not set
 CONFIG_PARPORT_1284=y
 CONFIG_PARPORT_NOT_PC=y
 CONFIG_PNP=y
@@ -1267,67 +1329,72 @@ CONFIG_PNP=y
 #
 # Protocols
 #
-CONFIG_ISAPNP=y
-CONFIG_PNPBIOS=y
-CONFIG_PNPBIOS_PROC_FS=y
 CONFIG_PNPACPI=y
 CONFIG_BLK_DEV=y
 CONFIG_BLK_DEV_FD=m
-# CONFIG_BLK_DEV_XD is not set
 # CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
 CONFIG_BLK_CPQ_DA=m
 CONFIG_BLK_CPQ_CISS_DA=m
 # CONFIG_CISS_SCSI_TAPE is not set
 CONFIG_BLK_DEV_DAC960=m
-# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_UMEM=m
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 # CONFIG_BLK_DEV_DRBD is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_NVME=m
+# CONFIG_BLK_DEV_OSD is not set
 CONFIG_BLK_DEV_SX8=m
-# CONFIG_BLK_DEV_UB is not set
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=8
+CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=16384
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
-CONFIG_ATA_OVER_ETH=m
+# CONFIG_ATA_OVER_ETH is not set
 CONFIG_VIRTIO_BLK=m
 # CONFIG_BLK_DEV_HD is not set
-CONFIG_BLK_DEV_RBD=m
+# CONFIG_BLK_DEV_RBD is not set
+CONFIG_BLK_DEV_RSXX=m
+
+#
+# Misc devices
+#
 CONFIG_SENSORS_LIS3LV02D=m
-CONFIG_MISC_DEVICES=y
 # CONFIG_AD525X_DPOT is not set
-# CONFIG_IBM_ASM is not set
+# CONFIG_ATMEL_PWM is not set
+CONFIG_DUMMY_IRQ=m
+CONFIG_IBM_ASM=m
 # CONFIG_PHANTOM is not set
 # CONFIG_INTEL_MID_PTI is not set
 # CONFIG_SGI_IOC4 is not set
 CONFIG_TIFM_CORE=m
 CONFIG_TIFM_7XX1=m
 CONFIG_ICS932S401=m
+# CONFIG_ATMEL_SSC is not set
 CONFIG_ENCLOSURE_SERVICES=m
 CONFIG_CS5535_MFGPT=m
 CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7
 CONFIG_CS5535_CLOCK_EVENT_SRC=m
 CONFIG_HP_ILO=m
-CONFIG_APDS9802ALS=m
-CONFIG_ISL29003=m
-CONFIG_ISL29020=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_SENSORS_BH1780=m
-CONFIG_SENSORS_BH1770=m
-CONFIG_SENSORS_APDS990X=m
-CONFIG_HMC6352=m
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
 CONFIG_DS1682=m
 CONFIG_VMWARE_BALLOON=m
-CONFIG_BMP085=m
+CONFIG_BMP085=y
+CONFIG_BMP085_I2C=m
 CONFIG_PCH_PHUB=m
 CONFIG_USB_SWITCH_FSA9480=m
-CONFIG_C2PORT=m
-CONFIG_C2PORT_DURAMAR_2150=m
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
 
 #
 # EEPROM support
@@ -1339,62 +1406,63 @@ CONFIG_EEPROM_93CX6=m
 CONFIG_CB710_CORE=m
 # CONFIG_CB710_DEBUG is not set
 CONFIG_CB710_DEBUG_ASSUMPTIONS=y
-CONFIG_IWMC3200TOP=m
-# CONFIG_IWMC3200TOP_DEBUG is not set
-# CONFIG_IWMC3200TOP_DEBUGFS is not set
 
 #
 # Texas Instruments shared transport line discipline
 #
 CONFIG_TI_ST=m
-CONFIG_SENSORS_LIS3_I2C=m
+# CONFIG_SENSORS_LIS3_I2C is not set
 
 #
 # Altera FPGA firmware download module
 #
-# CONFIG_ALTERA_STAPL is not set
+CONFIG_ALTERA_STAPL=m
+CONFIG_INTEL_MEI=m
+CONFIG_INTEL_MEI_ME=m
+CONFIG_VMWARE_VMCI=m
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
 # SCSI device support
 #
-CONFIG_SCSI_MOD=m
+CONFIG_SCSI_MOD=y
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
+CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
-# CONFIG_SCSI_TGT is not set
+CONFIG_SCSI_TGT=m
 CONFIG_SCSI_NETLINK=y
 CONFIG_SCSI_PROC_FS=y
 
 #
 # SCSI support type (disk, tape, CD-ROM)
 #
-CONFIG_BLK_DEV_SD=m
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
 CONFIG_SCSI_ENCLOSURE=m
 CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-# CONFIG_SCSI_SCAN_ASYNC is not set
-CONFIG_SCSI_WAIT_SCAN=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
 
 #
 # SCSI Transports
 #
 CONFIG_SCSI_SPI_ATTRS=m
 CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_TGT_ATTRS=y
 CONFIG_SCSI_ISCSI_ATTRS=m
 CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SAS_ATA=y
 CONFIG_SCSI_SAS_HOST_SMP=y
 CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
 CONFIG_SCSI_LOWLEVEL=y
 CONFIG_ISCSI_TCP=m
 CONFIG_ISCSI_BOOT_SYSFS=m
@@ -1407,36 +1475,29 @@ CONFIG_BLK_DEV_3W_XXXX_RAID=m
 CONFIG_SCSI_HPSA=m
 CONFIG_SCSI_3W_9XXX=m
 CONFIG_SCSI_3W_SAS=m
-CONFIG_SCSI_7000FASST=m
 CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AHA152X=m
-CONFIG_SCSI_AHA1542=m
-CONFIG_SCSI_AHA1740=m
 CONFIG_SCSI_AACRAID=m
 CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=5000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
 CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 CONFIG_SCSI_AIC79XX=m
 CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+CONFIG_AIC79XX_RESET_DELAY_MS=4000
 # CONFIG_AIC79XX_DEBUG_ENABLE is not set
 CONFIG_AIC79XX_DEBUG_MASK=0
 # CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
 CONFIG_SCSI_AIC94XX=m
 # CONFIG_AIC94XX_DEBUG is not set
 CONFIG_SCSI_MVSAS=m
-CONFIG_SCSI_MVSAS_DEBUG=y
-# CONFIG_SCSI_MVSAS_TASKLET is not set
-CONFIG_SCSI_MVUMI=m
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+# CONFIG_SCSI_MVUMI is not set
 CONFIG_SCSI_DPT_I2O=m
 CONFIG_SCSI_ADVANSYS=m
-CONFIG_SCSI_IN2000=m
 CONFIG_SCSI_ARCMSR=m
 CONFIG_MEGARAID_NEWGEN=y
 CONFIG_MEGARAID_MM=m
@@ -1446,30 +1507,28 @@ CONFIG_MEGARAID_SAS=m
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 # CONFIG_SCSI_MPT2SAS_LOGGING is not set
+CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_MPT3SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT3SAS_LOGGING is not set
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PCI=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
 CONFIG_SCSI_HPTIOP=m
 CONFIG_SCSI_BUSLOGIC=m
-CONFIG_SCSI_FLASHPOINT=y
+# CONFIG_SCSI_FLASHPOINT is not set
 CONFIG_VMWARE_PVSCSI=m
 CONFIG_LIBFC=m
 CONFIG_LIBFCOE=m
 CONFIG_FCOE=m
 CONFIG_FCOE_FNIC=m
 CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_DTC3280=m
 CONFIG_SCSI_EATA=m
 CONFIG_SCSI_EATA_TAGGED_QUEUE=y
-CONFIG_SCSI_EATA_LINKED_COMMANDS=y
+# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set
 CONFIG_SCSI_EATA_MAX_TAGS=16
 CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_FD_MCS=m
 CONFIG_SCSI_GDTH=m
 CONFIG_SCSI_ISCI=m
-CONFIG_SCSI_GENERIC_NCR5380=m
-CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
-CONFIG_SCSI_GENERIC_NCR53C400=y
-CONFIG_SCSI_IBMMCA=m
-# CONFIG_IBMMCA_SCSI_ORDER_STANDARD is not set
-# CONFIG_IBMMCA_SCSI_DEV_RESET is not set
 CONFIG_SCSI_IPS=m
 CONFIG_SCSI_INITIO=m
 CONFIG_SCSI_INIA100=m
@@ -1477,8 +1536,6 @@ CONFIG_SCSI_PPA=m
 CONFIG_SCSI_IMM=m
 # CONFIG_SCSI_IZIP_EPP16 is not set
 # CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_NCR53C406A=m
-CONFIG_SCSI_NCR_D700=m
 CONFIG_SCSI_STEX=m
 CONFIG_SCSI_SYM53C8XX_2=m
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
@@ -1488,39 +1545,22 @@ CONFIG_SCSI_SYM53C8XX_MMIO=y
 CONFIG_SCSI_IPR=m
 CONFIG_SCSI_IPR_TRACE=y
 CONFIG_SCSI_IPR_DUMP=y
-CONFIG_SCSI_NCR_Q720=m
-CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
-CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
-CONFIG_SCSI_NCR53C8XX_SYNC=20
-CONFIG_SCSI_PAS16=m
-CONFIG_SCSI_QLOGIC_FAS=m
 CONFIG_SCSI_QLOGIC_1280=m
 CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
-CONFIG_SCSI_SIM710=m
-CONFIG_SCSI_SYM53C416=m
+# CONFIG_SCSI_LPFC_DEBUG_FS is not set
 CONFIG_SCSI_DC395x=m
 CONFIG_SCSI_DC390T=m
-CONFIG_SCSI_T128=m
-CONFIG_SCSI_U14_34F=m
-CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
-CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
-CONFIG_SCSI_U14_34F_MAX_TAGS=8
-CONFIG_SCSI_ULTRASTOR=m
-CONFIG_SCSI_NSP32=m
+# CONFIG_SCSI_NSP32 is not set
 # CONFIG_SCSI_DEBUG is not set
 CONFIG_SCSI_PMCRAID=m
 CONFIG_SCSI_PM8001=m
 # CONFIG_SCSI_SRP is not set
 CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_LOWLEVEL_PCMCIA=y
-CONFIG_PCMCIA_AHA152X=m
-CONFIG_PCMCIA_FDOMAIN=m
-CONFIG_PCMCIA_NINJA_SCSI=m
-CONFIG_PCMCIA_QLOGIC=m
-CONFIG_PCMCIA_SYM53C500=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_CHELSIO_FCOE=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
@@ -1529,16 +1569,17 @@ CONFIG_SCSI_OSD_INITIATOR=m
 CONFIG_SCSI_OSD_ULD=m
 CONFIG_SCSI_OSD_DPRINT_SENSE=1
 # CONFIG_SCSI_OSD_DEBUG is not set
-CONFIG_ATA=m
+CONFIG_ATA=y
 # CONFIG_ATA_NONSTANDARD is not set
 CONFIG_ATA_VERBOSE_ERROR=y
 CONFIG_ATA_ACPI=y
+# CONFIG_SATA_ZPODD is not set
 CONFIG_SATA_PMP=y
 
 #
 # Controllers with non-SFF native interface
 #
-CONFIG_SATA_AHCI=m
+CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=m
 CONFIG_SATA_INIC162X=m
 CONFIG_SATA_ACARD_AHCI=m
@@ -1556,7 +1597,8 @@ CONFIG_ATA_BMDMA=y
 #
 # SATA SFF controllers with BMDMA
 #
-CONFIG_ATA_PIIX=m
+CONFIG_ATA_PIIX=y
+CONFIG_SATA_HIGHBANK=m
 CONFIG_SATA_MV=m
 CONFIG_SATA_NV=m
 CONFIG_SATA_PROMISE=m
@@ -1579,7 +1621,7 @@ CONFIG_PATA_ATP867X=m
 CONFIG_PATA_CMD64X=m
 CONFIG_PATA_CS5520=m
 CONFIG_PATA_CS5530=m
-CONFIG_PATA_CS5535=m
+# CONFIG_PATA_CS5535 is not set
 CONFIG_PATA_CS5536=m
 CONFIG_PATA_CYPRESS=m
 CONFIG_PATA_EFAR=m
@@ -1587,7 +1629,7 @@ CONFIG_PATA_HPT366=m
 CONFIG_PATA_HPT37X=m
 CONFIG_PATA_HPT3X2N=m
 CONFIG_PATA_HPT3X3=m
-CONFIG_PATA_HPT3X3_DMA=y
+# CONFIG_PATA_HPT3X3_DMA is not set
 CONFIG_PATA_IT8213=m
 CONFIG_PATA_IT821X=m
 CONFIG_PATA_JMICRON=m
@@ -1599,9 +1641,9 @@ CONFIG_PATA_OLDPIIX=m
 CONFIG_PATA_OPTIDMA=m
 CONFIG_PATA_PDC2027X=m
 CONFIG_PATA_PDC_OLD=m
-CONFIG_PATA_RADISYS=m
+# CONFIG_PATA_RADISYS is not set
 CONFIG_PATA_RDC=m
-CONFIG_PATA_SC1200=m
+# CONFIG_PATA_SC1200 is not set
 CONFIG_PATA_SCH=m
 CONFIG_PATA_SERVERWORKS=m
 CONFIG_PATA_SIL680=m
@@ -1615,91 +1657,105 @@ CONFIG_PATA_WINBOND=m
 # PIO-only SFF controllers
 #
 CONFIG_PATA_CMD640_PCI=m
-CONFIG_PATA_ISAPNP=m
 CONFIG_PATA_MPIIX=m
 CONFIG_PATA_NS87410=m
 CONFIG_PATA_OPTI=m
-CONFIG_PATA_PCMCIA=m
-CONFIG_PATA_QDI=m
+# CONFIG_PATA_PLATFORM is not set
 CONFIG_PATA_RZ1000=m
-CONFIG_PATA_WINBOND_VLB=m
 
 #
 # Generic fallback / legacy drivers
 #
 CONFIG_PATA_ACPI=m
 CONFIG_ATA_GENERIC=m
-CONFIG_PATA_LEGACY=m
+# CONFIG_PATA_LEGACY is not set
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
 CONFIG_MD_RAID1=m
 CONFIG_MD_RAID10=m
 CONFIG_MD_RAID456=m
-# CONFIG_MULTICORE_RAID456 is not set
 CONFIG_MD_MULTIPATH=m
-# CONFIG_MD_FAULTY is not set
-CONFIG_BLK_DEV_DM=m
+CONFIG_MD_FAULTY=m
+CONFIG_BCACHE=m
+# CONFIG_BCACHE_DEBUG is not set
+# CONFIG_BCACHE_EDEBUG is not set
+# CONFIG_BCACHE_CLOSURES_DEBUG is not set
+CONFIG_BLK_DEV_DM=y
 # CONFIG_DM_DEBUG is not set
 CONFIG_DM_BUFIO=m
+CONFIG_DM_BIO_PRISON=m
 CONFIG_DM_PERSISTENT_DATA=m
 CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_THIN_PROVISIONING=m
-# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
-# CONFIG_DM_DEBUG_SPACE_MAPS is not set
-CONFIG_DM_MIRROR=m
+CONFIG_DM_SNAPSHOT=y
+# CONFIG_DM_THIN_PROVISIONING is not set
+CONFIG_DM_CACHE=m
+CONFIG_DM_CACHE_MQ=m
+CONFIG_DM_CACHE_CLEANER=m
+CONFIG_DM_MIRROR=y
 CONFIG_DM_RAID=m
 CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_ZERO=m
+CONFIG_DM_ZERO=y
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
+# CONFIG_DM_DELAY is not set
 CONFIG_DM_UEVENT=y
-CONFIG_DM_FLAKEY=m
+# CONFIG_DM_FLAKEY is not set
+CONFIG_DM_VERITY=m
 # CONFIG_TARGET_CORE is not set
 CONFIG_FUSION=y
 CONFIG_FUSION_SPI=m
 CONFIG_FUSION_FC=m
 CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_MAX_SGE=40
 CONFIG_FUSION_CTL=m
-# CONFIG_FUSION_LOGGING is not set
+CONFIG_FUSION_LOGGING=y
 
 #
 # IEEE 1394 (FireWire) support
 #
 CONFIG_FIREWIRE=m
 CONFIG_FIREWIRE_OHCI=m
-CONFIG_FIREWIRE_OHCI_DEBUG=y
 CONFIG_FIREWIRE_SBP2=m
-CONFIG_FIREWIRE_NET=m
+# CONFIG_FIREWIRE_NET is not set
 # CONFIG_FIREWIRE_NOSY is not set
-# CONFIG_I2O is not set
-# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_I2O=m
+# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set
+CONFIG_I2O_EXT_ADAPTEC=y
+CONFIG_I2O_CONFIG=m
+CONFIG_I2O_CONFIG_OLD_IOCTL=y
+CONFIG_I2O_BUS=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+CONFIG_MACINTOSH_DRIVERS=y
+# CONFIG_MAC_EMUMOUSEBTN is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_CORE=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=m
+CONFIG_IFB=m
+# CONFIG_NET_TEAM is not set
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
 CONFIG_IMQ=m
 # CONFIG_IMQ_BEHAVIOR_AA is not set
 CONFIG_IMQ_BEHAVIOR_AB=y
 # CONFIG_IMQ_BEHAVIOR_BA is not set
 # CONFIG_IMQ_BEHAVIOR_BB is not set
 CONFIG_IMQ_NUM_DEVS=2
-# CONFIG_NET_FC is not set
-CONFIG_MII=m
-CONFIG_IEEE802154_DRIVERS=m
-CONFIG_IEEE802154_FAKEHARD=m
-CONFIG_IFB=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -1710,51 +1766,42 @@ CONFIG_ATM_DRIVERS=y
 CONFIG_ATM_TCP=m
 CONFIG_ATM_LANAI=m
 CONFIG_ATM_ENI=m
-CONFIG_ATM_ENI_DEBUG=y
-CONFIG_ATM_ENI_TUNE_BURST=y
-CONFIG_ATM_ENI_BURST_TX_16W=y
-CONFIG_ATM_ENI_BURST_TX_8W=y
-CONFIG_ATM_ENI_BURST_TX_4W=y
-CONFIG_ATM_ENI_BURST_TX_2W=y
-CONFIG_ATM_ENI_BURST_RX_16W=y
-CONFIG_ATM_ENI_BURST_RX_8W=y
-CONFIG_ATM_ENI_BURST_RX_4W=y
-CONFIG_ATM_ENI_BURST_RX_2W=y
+# CONFIG_ATM_ENI_DEBUG is not set
+# CONFIG_ATM_ENI_TUNE_BURST is not set
 CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-# CONFIG_ATM_ZATM_DEBUG is not set
+# CONFIG_ATM_ZATM is not set
 CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_NICSTAR_USE_SUNI=y
-CONFIG_ATM_NICSTAR_USE_IDT77105=y
+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
 CONFIG_ATM_IDT77252=m
 # CONFIG_ATM_IDT77252_DEBUG is not set
 # CONFIG_ATM_IDT77252_RCV_ALL is not set
 CONFIG_ATM_IDT77252_USE_SUNI=y
-CONFIG_ATM_AMBASSADOR=m
-# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-CONFIG_ATM_HORIZON=m
-# CONFIG_ATM_HORIZON_DEBUG is not set
-CONFIG_ATM_IA=m
-# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_FORE200E_USE_TASKLET=y
-CONFIG_ATM_FORE200E_TX_RETRY=16
-CONFIG_ATM_FORE200E_DEBUG=0
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_FORE200E is not set
 CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
+# CONFIG_ATM_HE_USE_SUNI is not set
 CONFIG_ATM_SOLOS=m
 
 #
 # CAIF transport drivers
 #
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_RING=m
+
+#
+# Distributed Switch Architecture drivers
+#
+CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_NET_DSA_MV88E6060=y
+CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
+CONFIG_NET_DSA_MV88E6131=y
+CONFIG_NET_DSA_MV88E6123_61_65=y
 CONFIG_ETHERNET=y
 CONFIG_MDIO=m
 CONFIG_NET_VENDOR_3COM=y
-CONFIG_EL1=m
-CONFIG_EL3=m
-CONFIG_3C515=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
 CONFIG_VORTEX=m
 CONFIG_TYPHOON=m
 CONFIG_NET_VENDOR_ADAPTEC=y
@@ -1764,16 +1811,16 @@ CONFIG_ACENIC=m
 # CONFIG_ACENIC_OMIT_TIGON_I is not set
 CONFIG_NET_VENDOR_AMD=y
 CONFIG_AMD8111_ETH=m
-CONFIG_LANCE=m
 CONFIG_PCNET32=m
-CONFIG_DEPCA=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_NI65=m
 CONFIG_NET_VENDOR_ATHEROS=y
 CONFIG_ATL2=m
 CONFIG_ATL1=m
 CONFIG_ATL1E=m
 CONFIG_ATL1C=m
+CONFIG_ALX=m
+CONFIG_NET_CADENCE=y
+CONFIG_ARM_AT91_ETHER=m
+CONFIG_MACB=m
 CONFIG_NET_VENDOR_BROADCOM=y
 CONFIG_B44=m
 CONFIG_B44_PCI_AUTOSELECT=y
@@ -1783,36 +1830,34 @@ CONFIG_BNX2=m
 CONFIG_CNIC=m
 CONFIG_TIGON3=m
 CONFIG_BNX2X=m
+CONFIG_BNX2X_SRIOV=y
 CONFIG_NET_VENDOR_BROCADE=y
 CONFIG_BNA=m
+CONFIG_NET_CALXEDA_XGMAC=m
 CONFIG_NET_VENDOR_CHELSIO=y
 CONFIG_CHELSIO_T1=m
 CONFIG_CHELSIO_T1_1G=y
 CONFIG_CHELSIO_T3=m
 CONFIG_CHELSIO_T4=m
 CONFIG_CHELSIO_T4VF=m
-CONFIG_NET_VENDOR_CIRRUS=y
-CONFIG_CS89x0=m
 CONFIG_NET_VENDOR_CISCO=y
 CONFIG_ENIC=m
 CONFIG_DNET=m
 CONFIG_NET_VENDOR_DEC=y
-CONFIG_EWRK3=m
 CONFIG_NET_TULIP=y
 CONFIG_DE2104X=m
 CONFIG_DE2104X_DSL=0
 CONFIG_TULIP=m
 # CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
+CONFIG_TULIP_MMIO=y
+CONFIG_TULIP_NAPI=y
+CONFIG_TULIP_NAPI_HW_MITIGATION=y
 CONFIG_DE4X5=m
 CONFIG_WINBOND_840=m
 CONFIG_DM9102=m
 CONFIG_ULI526X=m
 CONFIG_PCMCIA_XIRCOM=m
 CONFIG_NET_VENDOR_DLINK=y
-CONFIG_DE600=m
-CONFIG_DE620=m
 CONFIG_DL2K=m
 CONFIG_SUNDANCE=m
 # CONFIG_SUNDANCE_MMIO is not set
@@ -1822,126 +1867,89 @@ CONFIG_NET_VENDOR_EXAR=y
 CONFIG_S2IO=m
 CONFIG_VXGE=m
 # CONFIG_VXGE_DEBUG_TRACE_ALL is not set
-CONFIG_NET_VENDOR_FUJITSU=y
-CONFIG_AT1700=m
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_ETH16I=m
 CONFIG_NET_VENDOR_HP=y
 CONFIG_HP100=m
-CONFIG_NET_VENDOR_IBM=y
-# CONFIG_IBM_EMAC_ZMII is not set
-# CONFIG_IBM_EMAC_RGMII is not set
-# CONFIG_IBM_EMAC_TAH is not set
-# CONFIG_IBM_EMAC_EMAC4 is not set
-# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set
-# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set
-# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=m
 CONFIG_E1000E=m
 CONFIG_IGB=m
+CONFIG_IGB_HWMON=y
 CONFIG_IGB_DCA=y
 CONFIG_IGBVF=m
 CONFIG_IXGB=m
 CONFIG_IXGBE=m
+CONFIG_IXGBE_HWMON=y
 CONFIG_IXGBE_DCA=y
 CONFIG_IXGBEVF=m
 CONFIG_NET_VENDOR_I825XX=y
-CONFIG_ELPLUS=m
-CONFIG_EL16=m
-CONFIG_ELMC=m
-CONFIG_ELMC_II=m
-CONFIG_APRICOT=m
-CONFIG_EEXPRESS=m
-CONFIG_EEXPRESS_PRO=m
-CONFIG_LP486E=m
-CONFIG_NI52=m
-CONFIG_ZNET=m
 CONFIG_IP1000=m
 CONFIG_JME=m
 CONFIG_NET_VENDOR_MARVELL=y
+CONFIG_MVMDIO=m
 CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
 CONFIG_SKGE_GENESIS=y
 CONFIG_SKY2=m
+# CONFIG_SKY2_DEBUG is not set
 CONFIG_NET_VENDOR_MELLANOX=y
 CONFIG_MLX4_EN=m
 CONFIG_MLX4_CORE=m
 CONFIG_MLX4_DEBUG=y
 CONFIG_NET_VENDOR_MICREL=y
-CONFIG_KS8842=m
-CONFIG_KS8851_MLL=m
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 CONFIG_KSZ884X_PCI=m
 CONFIG_NET_VENDOR_MYRI=y
 CONFIG_MYRI10GE=m
 CONFIG_MYRI10GE_DCA=y
 CONFIG_FEALNX=m
 CONFIG_NET_VENDOR_NATSEMI=y
-CONFIG_IBMLANA=m
 CONFIG_NATSEMI=m
 CONFIG_NS83820=m
 CONFIG_NET_VENDOR_8390=y
-CONFIG_EL2=m
-CONFIG_AC3200=m
-CONFIG_PCMCIA_AXNET=m
-CONFIG_E2100=m
-CONFIG_ES3210=m
-CONFIG_HPLAN_PLUS=m
-CONFIG_HPLAN=m
-CONFIG_LNE390=m
-CONFIG_NE2000=m
-CONFIG_NE2_MCA=m
 CONFIG_NE2K_PCI=m
-CONFIG_NE3210=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_ULTRAMCA=m
-CONFIG_ULTRA=m
-CONFIG_ULTRA32=m
-CONFIG_WD80x3=m
 CONFIG_NET_VENDOR_NVIDIA=y
 CONFIG_FORCEDETH=m
 CONFIG_NET_VENDOR_OKI=y
 CONFIG_PCH_GBE=m
 CONFIG_ETHOC=m
-CONFIG_NET_PACKET_ENGINE=y
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
+# CONFIG_NET_PACKET_ENGINE is not set
 CONFIG_NET_VENDOR_QLOGIC=y
 CONFIG_QLA3XXX=m
 CONFIG_QLCNIC=m
+CONFIG_QLCNIC_SRIOV=y
 CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
-CONFIG_NET_VENDOR_RACAL=y
 CONFIG_NET_VENDOR_REALTEK=y
 CONFIG_ATP=m
 CONFIG_8139CP=m
 CONFIG_8139TOO=m
 # CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
 CONFIG_8139TOO_8129=y
 # CONFIG_8139_OLD_RX_RESET is not set
 CONFIG_R8169=m
 CONFIG_NET_VENDOR_RDC=y
 CONFIG_R6040=m
 CONFIG_NET_VENDOR_SEEQ=y
-CONFIG_SEEQ8005=m
 CONFIG_NET_VENDOR_SILAN=y
 CONFIG_SC92031=m
 CONFIG_NET_VENDOR_SIS=y
 CONFIG_SIS900=m
 CONFIG_SIS190=m
 CONFIG_SFC=m
-CONFIG_SFC_MTD=y
+CONFIG_SFC_MCDI_MON=y
+CONFIG_SFC_SRIOV=y
 CONFIG_NET_VENDOR_SMSC=y
-CONFIG_SMC9194=m
-CONFIG_PCMCIA_SMC91C92=m
 CONFIG_EPIC100=m
 CONFIG_SMSC9420=m
 CONFIG_NET_VENDOR_STMICRO=y
 CONFIG_STMMAC_ETH=m
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_STMMAC_PCI=y
+# CONFIG_STMMAC_DEBUG_FS is not set
 # CONFIG_STMMAC_DA is not set
-CONFIG_STMMAC_RING=y
-# CONFIG_STMMAC_CHAINED is not set
 CONFIG_NET_VENDOR_SUN=y
 CONFIG_HAPPYMEAL=m
 CONFIG_SUNGEM=m
@@ -1955,16 +1963,22 @@ CONFIG_NET_VENDOR_VIA=y
 CONFIG_VIA_RHINE=m
 CONFIG_VIA_RHINE_MMIO=y
 CONFIG_VIA_VELOCITY=m
-CONFIG_NET_VENDOR_XIRCOM=y
-CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_NET_VENDOR_WIZNET=y
+CONFIG_WIZNET_W5100=m
+CONFIG_WIZNET_W5300=m
+# CONFIG_WIZNET_BUS_DIRECT is not set
+# CONFIG_WIZNET_BUS_INDIRECT is not set
+CONFIG_WIZNET_BUS_ANY=y
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
-CONFIG_NET_SB1000=m
+# CONFIG_NET_SB1000 is not set
 CONFIG_PHYLIB=y
 
 #
 # MII PHY device drivers
 #
+CONFIG_AT803X_PHY=m
+CONFIG_AMD_PHY=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -1973,15 +1987,19 @@ CONFIG_CICADA_PHY=m
 CONFIG_VITESSE_PHY=m
 CONFIG_SMSC_PHY=m
 CONFIG_BROADCOM_PHY=m
+CONFIG_BCM87XX_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
 CONFIG_NATIONAL_PHY=m
 CONFIG_STE10XP=m
 CONFIG_LSI_ET1011C_PHY=m
 CONFIG_MICREL_PHY=m
-# CONFIG_FIXED_PHY is not set
+CONFIG_FIXED_PHY=y
 CONFIG_MDIO_BITBANG=m
-CONFIG_MDIO_GPIO=m
+# CONFIG_MDIO_GPIO is not set
+CONFIG_MDIO_BUS_MUX=m
+CONFIG_MDIO_BUS_MUX_GPIO=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=m
 # CONFIG_PLIP is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -1995,12 +2013,8 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-CONFIG_SLIP=m
+# CONFIG_SLIP is not set
 CONFIG_SLHC=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-# CONFIG_TR is not set
 
 #
 # USB Network Adapters
@@ -2009,11 +2023,14 @@ CONFIG_USB_CATC=m
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
 CONFIG_USB_USBNET=m
 CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_AX88179_178A=m
 CONFIG_USB_NET_CDCETHER=m
 CONFIG_USB_NET_CDC_EEM=m
 CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
 CONFIG_USB_NET_DM9601=m
 CONFIG_USB_NET_SMSC75XX=m
 CONFIG_USB_NET_SMSC95XX=m
@@ -2032,91 +2049,172 @@ CONFIG_USB_KC2190=y
 CONFIG_USB_NET_ZAURUS=m
 CONFIG_USB_NET_CX82310_ETH=m
 CONFIG_USB_NET_KALMIA=m
+CONFIG_USB_NET_QMI_WWAN=m
 CONFIG_USB_HSO=m
 CONFIG_USB_NET_INT51X1=m
-# CONFIG_USB_IPHETH is not set
+CONFIG_USB_IPHETH=m
 CONFIG_USB_SIERRA_NET=m
 CONFIG_USB_VL600=m
 CONFIG_WLAN=y
-CONFIG_PCMCIA_RAYCS=m
-# CONFIG_LIBERTAS_THINFIRM is not set
+CONFIG_LIBERTAS_THINFIRM=m
+# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
+CONFIG_LIBERTAS_THINFIRM_USB=m
 CONFIG_AIRO=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
-CONFIG_PCMCIA_ATMEL=m
 CONFIG_AT76C50X_USB=m
-CONFIG_AIRO_CS=m
-CONFIG_PCMCIA_WL3501=m
 # CONFIG_PRISM54 is not set
 CONFIG_USB_ZD1201=m
 CONFIG_USB_NET_RNDIS_WLAN=m
-# CONFIG_RTL8180 is not set
-# CONFIG_RTL8187 is not set
-# CONFIG_ADM8211 is not set
-# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_MWL8K is not set
-# CONFIG_ATH_COMMON is not set
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_ADM8211=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_MWL8K=m
+CONFIG_ATH_COMMON=m
+CONFIG_ATH_CARDS=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+CONFIG_ATH5K_DEBUG=y
+# CONFIG_ATH5K_TRACER is not set
+CONFIG_ATH5K_PCI=y
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_PCI=y
+CONFIG_ATH9K_AHB=y
+# CONFIG_ATH9K_DEBUGFS is not set
+# CONFIG_ATH9K_LEGACY_RATE_CONTROL is not set
+CONFIG_ATH9K_HTC=m
+# CONFIG_ATH9K_HTC_DEBUGFS is not set
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+# CONFIG_CARL9170_HWRNG is not set
+# CONFIG_ATH6KL is not set
+CONFIG_AR5523=m
+CONFIG_WIL6210=m
+CONFIG_WIL6210_ISR_COR=y
+CONFIG_B43=m
+CONFIG_B43_SSB=y
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_SDIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_N=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+# CONFIG_B43LEGACY_DEBUG is not set
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
 # CONFIG_BRCMFMAC is not set
 CONFIG_HOSTAP=m
 CONFIG_HOSTAP_FIRMWARE=y
-# CONFIG_HOSTAP_FIRMWARE_NVRAM is not set
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
 CONFIG_HOSTAP_PLX=m
 CONFIG_HOSTAP_PCI=m
-CONFIG_HOSTAP_CS=m
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
 # CONFIG_IWLWIFI is not set
-# CONFIG_IWL4965 is not set
-# CONFIG_IWL3945 is not set
-# CONFIG_IWM is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
-# CONFIG_P54_COMMON is not set
-# CONFIG_RT2X00 is not set
-# CONFIG_RTL8192CE is not set
-# CONFIG_RTL8192SE is not set
-# CONFIG_RTL8192DE is not set
-# CONFIG_RTL8192CU is not set
-# CONFIG_WL1251 is not set
-# CONFIG_WL12XX_MENU is not set
-# CONFIG_ZD1211RW is not set
-# CONFIG_MWIFIEX is not set
+CONFIG_IWLEGACY=m
+CONFIG_IWL4965=m
+CONFIG_IWL3945=m
+
+#
+# iwl3945 / iwl4965 Debugging Options
+#
+# CONFIG_IWLEGACY_DEBUG is not set
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_LIBERTAS_MESH=y
+CONFIG_HERMES=m
+# CONFIG_HERMES_PRISM is not set
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_NORTEL_HERMES=m
+CONFIG_ORINOCO_USB=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_PCI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RT2800PCI_RT33XX=y
+CONFIG_RT2800PCI_RT35XX=y
+CONFIG_RT2800PCI_RT53XX=y
+CONFIG_RT2800PCI_RT3290=y
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800USB_RT35XX=y
+CONFIG_RT2800USB_RT53XX=y
+CONFIG_RT2800USB_RT55XX=y
+CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_MMIO=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_RTLWIFI=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTL8192C_COMMON=m
+CONFIG_WL_TI=y
+CONFIG_WL1251=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL12XX=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_WILINK_PLATFORM_DATA=y
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+# CONFIG_MWIFIEX_PCIE is not set
+CONFIG_MWIFIEX_USB=m
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
 #
-CONFIG_WAN=y
-CONFIG_HOSTESS_SV11=m
-CONFIG_COSA=m
-CONFIG_LANMEDIA=m
-CONFIG_SEALEVEL_4021=m
-CONFIG_HDLC=m
-CONFIG_HDLC_RAW=m
-CONFIG_HDLC_RAW_ETH=m
-CONFIG_HDLC_CISCO=m
-CONFIG_HDLC_FR=m
-CONFIG_HDLC_PPP=m
-
-#
-# X.25/LAPB support is disabled
-#
-CONFIG_PCI200SYN=m
-CONFIG_WANXL=m
-# CONFIG_WANXL_BUILD_FIRMWARE is not set
-CONFIG_PC300TOO=m
-CONFIG_N2=m
-CONFIG_C101=m
-CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
-CONFIG_DLCI_MAX=8
-CONFIG_SDLA=m
-CONFIG_SBNI=m
-CONFIG_SBNI_MULTILINE=y
+# CONFIG_WAN is not set
 CONFIG_VMXNET3=m
 CONFIG_ISDN=y
 CONFIG_ISDN_I4L=m
@@ -2147,9 +2245,9 @@ CONFIG_ISDN_DRV_HISAX=m
 #
 CONFIG_HISAX_EURO=y
 CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
+CONFIG_HISAX_NO_SENDCOMPLETE=y
+CONFIG_HISAX_NO_LLC=y
+CONFIG_HISAX_NO_KEYPAD=y
 CONFIG_HISAX_1TR6=y
 CONFIG_HISAX_NI1=y
 CONFIG_HISAX_MAX_CARDS=8
@@ -2157,27 +2255,17 @@ CONFIG_HISAX_MAX_CARDS=8
 #
 # HiSax supported cards
 #
-CONFIG_HISAX_16_0=y
 CONFIG_HISAX_16_3=y
 CONFIG_HISAX_TELESPCI=y
 CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_AVM_A1=y
 CONFIG_HISAX_FRITZPCI=y
 CONFIG_HISAX_AVM_A1_PCMCIA=y
 CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_IX1MICROR2=y
 CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_ASUSCOM=y
-CONFIG_HISAX_TELEINT=y
-CONFIG_HISAX_HFCS=y
 CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_SPORTSTER=y
-CONFIG_HISAX_MIC=y
 CONFIG_HISAX_NETJET=y
 CONFIG_HISAX_NETJET_U=y
 CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_ISURF=y
-CONFIG_HISAX_HSTSAPHIR=y
 CONFIG_HISAX_BKM_A4T=y
 CONFIG_HISAX_SCT_QUADRO=y
 CONFIG_HISAX_GAZEL=y
@@ -2185,15 +2273,11 @@ CONFIG_HISAX_HFC_PCI=y
 CONFIG_HISAX_W6692=y
 CONFIG_HISAX_HFC_SX=y
 CONFIG_HISAX_ENTERNOW_PCI=y
-CONFIG_HISAX_DEBUG=y
+# CONFIG_HISAX_DEBUG is not set
 
 #
 # HiSax PCMCIA card service modules
 #
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
 
 #
 # HiSax sub driver modules
@@ -2206,10 +2290,6 @@ CONFIG_HISAX_FRITZ_PCIPNP=m
 #
 # Active cards
 #
-CONFIG_ISDN_DRV_ICN=m
-CONFIG_ISDN_DRV_PCBIT=m
-CONFIG_ISDN_DRV_SC=m
-CONFIG_ISDN_DRV_ACT2000=m
 CONFIG_ISDN_CAPI=m
 CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
 CONFIG_CAPI_TRACE=y
@@ -2221,12 +2301,8 @@ CONFIG_ISDN_CAPI_CAPIDRV=m
 # CAPI hardware drivers
 #
 CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1ISA=m
 CONFIG_ISDN_DRV_AVMB1_B1PCI=m
 CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_T1ISA=m
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
 CONFIG_ISDN_DRV_AVMB1_T1PCI=m
 CONFIG_ISDN_DRV_AVMB1_C4=m
 CONFIG_CAPI_EICON=y
@@ -2236,43 +2312,20 @@ CONFIG_ISDN_DIVAS_PRIPCI=y
 CONFIG_ISDN_DIVAS_DIVACAPI=m
 CONFIG_ISDN_DIVAS_USERIDI=m
 CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_CAPI=y
-# CONFIG_GIGASET_I4L is not set
-# CONFIG_GIGASET_DUMMYLL is not set
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-CONFIG_GIGASET_M101=m
-# CONFIG_GIGASET_DEBUG is not set
+# CONFIG_ISDN_DRV_GIGASET is not set
 CONFIG_HYSDN=m
 CONFIG_HYSDN_CAPI=y
-CONFIG_MISDN=m
-CONFIG_MISDN_DSP=m
-CONFIG_MISDN_L1OIP=m
-
-#
-# mISDN hardware drivers
-#
-CONFIG_MISDN_HFCPCI=m
-CONFIG_MISDN_HFCMULTI=m
-CONFIG_MISDN_HFCUSB=m
-CONFIG_MISDN_AVMFRITZ=m
-CONFIG_MISDN_SPEEDFAX=m
-CONFIG_MISDN_INFINEON=m
-CONFIG_MISDN_W6692=m
-CONFIG_MISDN_NETJET=m
-CONFIG_MISDN_IPAC=m
-CONFIG_MISDN_ISAR=m
+# CONFIG_MISDN is not set
 CONFIG_ISDN_HDLC=m
-# CONFIG_PHONE is not set
 
 #
 # Input device support
 #
 CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_FF_MEMLESS is not set
 CONFIG_INPUT_POLLDEV=m
 CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INPUT_MATRIXKMAP=m
 
 #
 # Userland interfaces
@@ -2282,57 +2335,82 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 # CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
+CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_EVBUG is not set
 
 #
 # Input Device Drivers
 #
 CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ADP5588=m
-CONFIG_KEYBOARD_ADP5589=m
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
 CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_QT1070=m
-CONFIG_KEYBOARD_QT2160=m
-CONFIG_KEYBOARD_LKKBD=m
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
 CONFIG_KEYBOARD_GPIO=m
 CONFIG_KEYBOARD_GPIO_POLLED=m
-CONFIG_KEYBOARD_TCA6416=m
-CONFIG_KEYBOARD_MATRIX=m
-CONFIG_KEYBOARD_LM8323=m
-CONFIG_KEYBOARD_MAX7359=m
-CONFIG_KEYBOARD_MCS=m
-CONFIG_KEYBOARD_MPR121=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_OPENCORES=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-# CONFIG_INPUT_MOUSE is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_LM8333=m
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_PS2_OLPC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_CYAPA=m
+CONFIG_MOUSE_VSXXXAA=m
+# CONFIG_MOUSE_GPIO is not set
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+CONFIG_MOUSE_SYNAPTICS_USB=m
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_AD714X=m
-CONFIG_INPUT_AD714X_I2C=m
-CONFIG_INPUT_BMA150=m
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
 CONFIG_INPUT_PCSPKR=m
 # CONFIG_INPUT_MMA8450 is not set
 # CONFIG_INPUT_MPU3050 is not set
 CONFIG_INPUT_APANEL=m
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
 # CONFIG_INPUT_WISTRON_BTNS is not set
-# CONFIG_INPUT_ATLAS_BTNS is not set
+CONFIG_INPUT_ATLAS_BTNS=m
 CONFIG_INPUT_ATI_REMOTE2=m
 CONFIG_INPUT_KEYSPAN_REMOTE=m
 # CONFIG_INPUT_KXTJ9 is not set
 CONFIG_INPUT_POWERMATE=m
 CONFIG_INPUT_YEALINK=m
 CONFIG_INPUT_CM109=m
-# CONFIG_INPUT_UINPUT is not set
-CONFIG_INPUT_PCF50633_PMU=m
-CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_RETU_PWRBUTTON=m
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_PCF8574 is not set
+CONFIG_INPUT_PWM_BEEPER=m
 CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
 # CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
 # CONFIG_INPUT_CMA3000 is not set
 
 #
@@ -2345,123 +2423,119 @@ CONFIG_SERIO_SERPORT=y
 # CONFIG_SERIO_PARKBD is not set
 # CONFIG_SERIO_PCIPS2 is not set
 CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
+CONFIG_SERIO_RAW=m
 CONFIG_SERIO_ALTERA_PS2=m
-CONFIG_SERIO_PS2MULT=m
+# CONFIG_SERIO_PS2MULT is not set
+CONFIG_SERIO_ARC_PS2=m
+CONFIG_SERIO_APBPS2=m
 # CONFIG_GAMEPORT is not set
 
 #
 # Character devices
 #
+CONFIG_TTY=y
 CONFIG_VT=y
 CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_VT_CONSOLE_SLEEP=y
 CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
 CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_ROCKETPORT=m
+CONFIG_CYCLADES=m
+# CONFIG_CYZ_INTR is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_SYNCLINK_GT=m
 CONFIG_NOZOMI=m
+# CONFIG_ISI is not set
+CONFIG_N_HDLC=m
 CONFIG_N_GSM=m
 # CONFIG_TRACE_SINK is not set
-CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
 
 #
 # Serial drivers
 #
 CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_DMA=y
 CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_PNP=y
-CONFIG_SERIAL_8250_CS=m
 CONFIG_SERIAL_8250_NR_UARTS=4
 CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_FOURPORT=m
-CONFIG_SERIAL_8250_ACCENT=m
-CONFIG_SERIAL_8250_BOCA=m
-CONFIG_SERIAL_8250_EXAR_ST16C554=m
-CONFIG_SERIAL_8250_HUB6=m
 CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
 CONFIG_SERIAL_8250_RSA=y
-# CONFIG_SERIAL_8250_MCA is not set
+# CONFIG_SERIAL_8250_DW is not set
 
 #
 # Non-8250 serial port support
 #
-CONFIG_SERIAL_MFD_HSU=m
-CONFIG_SERIAL_UARTLITE=m
+# CONFIG_SERIAL_MFD_HSU is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
-CONFIG_SERIAL_TIMBERDALE=m
-CONFIG_SERIAL_ALTERA_JTAGUART=m
-CONFIG_SERIAL_ALTERA_UART=m
-CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
-CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
-CONFIG_SERIAL_PCH_UART=m
-CONFIG_SERIAL_XILINX_PS_UART=m
+CONFIG_SERIAL_JSM=m
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_SCCNXP=m
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+CONFIG_SERIAL_ARC=m
+CONFIG_SERIAL_ARC_NR_PORTS=1
+CONFIG_SERIAL_RP2=m
+CONFIG_SERIAL_RP2_NR_UARTS=32
+# CONFIG_TTY_PRINTK is not set
 CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
+CONFIG_LP_CONSOLE=y
 CONFIG_PPDEV=m
 CONFIG_HVC_DRIVER=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_PANIC_EVENT=y
-# CONFIG_IPMI_PANIC_STRING is not set
+# CONFIG_IPMI_PANIC_EVENT is not set
 CONFIG_IPMI_DEVICE_INTERFACE=m
 CONFIG_IPMI_SI=m
 CONFIG_IPMI_WATCHDOG=m
 CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_TIMERIOMEM=m
 CONFIG_HW_RANDOM_INTEL=m
 CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_ATMEL=m
 CONFIG_HW_RANDOM_GEODE=m
 CONFIG_HW_RANDOM_VIA=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_NVRAM=m
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
+CONFIG_HW_RANDOM_EXYNOS=m
+CONFIG_NVRAM=y
+CONFIG_R3964=m
 # CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# PCMCIA character devices
-#
-CONFIG_SYNCLINK_CS=m
-# CONFIG_CARDMAN_4000 is not set
-# CONFIG_CARDMAN_4040 is not set
-CONFIG_IPWIRELESS=m
+CONFIG_SONYPI=m
 CONFIG_MWAVE=m
-CONFIG_SCx200_GPIO=m
 CONFIG_PC8736x_GPIO=m
 CONFIG_NSC_GPIO=m
-# CONFIG_RAW_DRIVER is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
 CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
+# CONFIG_HPET_MMAP is not set
 CONFIG_HANGCHECK_TIMER=m
 # CONFIG_TCG_TPM is not set
 # CONFIG_TELCLOCK is not set
-CONFIG_DEVPORT=y
-# CONFIG_RAMOOPS is not set
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_COMPAT=y
 CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_MUX=m
-
-#
-# Multiplexer I2C Chip support
-#
-CONFIG_I2C_MUX_GPIO=m
-CONFIG_I2C_MUX_PCA9541=m
-CONFIG_I2C_MUX_PCA954x=m
+# CONFIG_I2C_MUX is not set
 CONFIG_I2C_HELPER_AUTO=y
 CONFIG_I2C_SMBUS=m
 CONFIG_I2C_ALGOBIT=m
@@ -2482,6 +2556,7 @@ CONFIG_I2C_AMD756_S4882=m
 CONFIG_I2C_AMD8111=m
 CONFIG_I2C_I801=m
 CONFIG_I2C_ISCH=m
+CONFIG_I2C_ISMT=m
 CONFIG_I2C_PIIX4=m
 CONFIG_I2C_NFORCE2=m
 CONFIG_I2C_NFORCE2_S4985=m
@@ -2499,16 +2574,19 @@ CONFIG_I2C_SCMI=m
 #
 # I2C system bus drivers (mostly embedded / system-on-chip)
 #
+CONFIG_I2C_CBUS_GPIO=m
 CONFIG_I2C_DESIGNWARE_CORE=m
-CONFIG_I2C_DESIGNWARE_PCI=m
-CONFIG_I2C_GPIO=m
-CONFIG_I2C_INTEL_MID=m
-CONFIG_I2C_OCORES=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
 CONFIG_I2C_PCA_PLATFORM=m
-# CONFIG_I2C_PXA_PCI is not set
+CONFIG_I2C_PXA=m
+CONFIG_I2C_PXA_PCI=y
 CONFIG_I2C_SIMTEC=m
-CONFIG_I2C_XILINX=m
-CONFIG_I2C_EG20T=m
+# CONFIG_I2C_XILINX is not set
 
 #
 # External I2C/SMBus adapter drivers
@@ -2516,21 +2594,32 @@ CONFIG_I2C_EG20T=m
 CONFIG_I2C_DIOLAN_U2C=m
 CONFIG_I2C_PARPORT=m
 CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_TAOS_EVM=m
+# CONFIG_I2C_TAOS_EVM is not set
 CONFIG_I2C_TINY_USB=m
+CONFIG_I2C_VIPERBOARD=m
 
 #
 # Other I2C/SMBus bus drivers
 #
-CONFIG_I2C_PCA_ISA=m
-CONFIG_I2C_STUB=m
-# CONFIG_SCx200_I2C is not set
 CONFIG_SCx200_ACB=m
+CONFIG_I2C_STUB=m
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_SPI is not set
 
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+CONFIG_HSI=m
+CONFIG_HSI_BOARDINFO=y
+
+#
+# HSI clients
+#
+CONFIG_HSI_CHAR=m
+
 #
 # PPS support
 #
@@ -2553,67 +2642,80 @@ CONFIG_PPS=m
 # PTP clock support
 #
 CONFIG_PTP_1588_CLOCK=m
-CONFIG_DP83640_PHY=m
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+CONFIG_PTP_1588_CLOCK_PCH=m
 CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
 CONFIG_GPIOLIB=y
+CONFIG_OF_GPIO=y
+CONFIG_GPIO_ACPI=y
 # CONFIG_DEBUG_GPIO is not set
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC=m
-CONFIG_GPIO_MAX730X=m
 
 #
 # Memory mapped GPIO drivers:
 #
-CONFIG_GPIO_GENERIC_PLATFORM=m
-CONFIG_GPIO_IT8761E=m
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+CONFIG_GPIO_TS5500=m
 CONFIG_GPIO_SCH=m
-CONFIG_GPIO_VX855=m
+CONFIG_GPIO_ICH=m
+# CONFIG_GPIO_VX855 is not set
+# CONFIG_GPIO_LYNXPOINT is not set
+# CONFIG_GPIO_GRGPIO is not set
 
 #
 # I2C GPIO expanders:
 #
-CONFIG_GPIO_MAX7300=m
-CONFIG_GPIO_MAX732X=m
-CONFIG_GPIO_PCA953X=m
-CONFIG_GPIO_PCF857X=m
-CONFIG_GPIO_ADP5588=m
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
+CONFIG_GPIO_ADNP=m
 
 #
 # PCI GPIO expanders:
 #
-CONFIG_GPIO_CS5535=m
-# CONFIG_GPIO_BT8XX is not set
+CONFIG_GPIO_CS5535=y
+# CONFIG_GPIO_AMD8111 is not set
 # CONFIG_GPIO_LANGWELL is not set
-CONFIG_GPIO_PCH=m
-CONFIG_GPIO_ML_IOH=m
-# CONFIG_GPIO_TIMBERDALE is not set
-CONFIG_GPIO_RDC321X=m
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_SODAVILLE is not set
+# CONFIG_GPIO_RDC321X is not set
 
 #
 # SPI GPIO expanders:
 #
-CONFIG_GPIO_MCP23S08=m
+# CONFIG_GPIO_MCP23S08 is not set
 
 #
 # AC97 GPIO expanders:
 #
-# CONFIG_GPIO_UCB1400 is not set
 
 #
 # MODULbus GPIO expanders:
 #
-CONFIG_GPIO_JANZ_TTL=m
+
+#
+# USB GPIO expanders:
+#
+CONFIG_GPIO_VIPERBOARD=m
 CONFIG_W1=m
 CONFIG_W1_CON=y
 
 #
 # 1-wire Bus Masters
 #
-CONFIG_W1_MASTER_MATROX=m
+# CONFIG_W1_MASTER_MATROX is not set
 CONFIG_W1_MASTER_DS2490=m
 CONFIG_W1_MASTER_DS2482=m
 CONFIG_W1_MASTER_DS1WM=m
-CONFIG_W1_MASTER_GPIO=m
+# CONFIG_W1_MASTER_GPIO is not set
 
 #
 # 1-wire Slaves
@@ -2621,29 +2723,41 @@ CONFIG_W1_MASTER_GPIO=m
 CONFIG_W1_SLAVE_THERM=m
 CONFIG_W1_SLAVE_SMEM=m
 CONFIG_W1_SLAVE_DS2408=m
+CONFIG_W1_SLAVE_DS2408_READBACK=y
+CONFIG_W1_SLAVE_DS2413=m
 CONFIG_W1_SLAVE_DS2423=m
 CONFIG_W1_SLAVE_DS2431=m
 CONFIG_W1_SLAVE_DS2433=m
 CONFIG_W1_SLAVE_DS2433_CRC=y
 CONFIG_W1_SLAVE_DS2760=m
 CONFIG_W1_SLAVE_DS2780=m
+CONFIG_W1_SLAVE_DS2781=m
+CONFIG_W1_SLAVE_DS28E04=m
 CONFIG_W1_SLAVE_BQ27000=m
 CONFIG_POWER_SUPPLY=y
 # CONFIG_POWER_SUPPLY_DEBUG is not set
 # CONFIG_PDA_POWER is not set
+CONFIG_GENERIC_ADC_BATTERY=m
 # CONFIG_TEST_POWER is not set
 # CONFIG_BATTERY_DS2760 is not set
 # CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
 # CONFIG_BATTERY_DS2782 is not set
-# CONFIG_BATTERY_BQ20Z75 is not set
+CONFIG_BATTERY_OLPC=y
+# CONFIG_BATTERY_SBS is not set
 # CONFIG_BATTERY_BQ27x00 is not set
 # CONFIG_BATTERY_MAX17040 is not set
 # CONFIG_BATTERY_MAX17042 is not set
-# CONFIG_CHARGER_PCF50633 is not set
-# CONFIG_CHARGER_ISP1704 is not set
 # CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
 # CONFIG_CHARGER_GPIO is not set
-CONFIG_HWMON=m
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_AVS=y
+CONFIG_HWMON=y
 CONFIG_HWMON_VID=m
 # CONFIG_HWMON_DEBUG_CHIP is not set
 
@@ -2660,6 +2774,8 @@ CONFIG_SENSORS_ADM1026=m
 CONFIG_SENSORS_ADM1029=m
 CONFIG_SENSORS_ADM1031=m
 CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_ADT7X10=m
+CONFIG_SENSORS_ADT7410=m
 CONFIG_SENSORS_ADT7411=m
 CONFIG_SENSORS_ADT7462=m
 CONFIG_SENSORS_ADT7470=m
@@ -2680,12 +2796,14 @@ CONFIG_SENSORS_FSCHMD=m
 CONFIG_SENSORS_G760A=m
 CONFIG_SENSORS_GL518SM=m
 CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_GPIO_FAN=m
+# CONFIG_SENSORS_GPIO_FAN is not set
+CONFIG_SENSORS_HIH6130=m
 CONFIG_SENSORS_CORETEMP=m
 CONFIG_SENSORS_IBMAEM=m
 CONFIG_SENSORS_IBMPEX=m
+# CONFIG_SENSORS_IIO_HWMON is not set
 CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_JC42=m
+# CONFIG_SENSORS_JC42 is not set
 CONFIG_SENSORS_LINEAGE=m
 CONFIG_SENSORS_LM63=m
 CONFIG_SENSORS_LM73=m
@@ -2703,14 +2821,19 @@ CONFIG_SENSORS_LTC4151=m
 CONFIG_SENSORS_LTC4215=m
 CONFIG_SENSORS_LTC4245=m
 CONFIG_SENSORS_LTC4261=m
+CONFIG_SENSORS_LM95234=m
 CONFIG_SENSORS_LM95241=m
 CONFIG_SENSORS_LM95245=m
 CONFIG_SENSORS_MAX16065=m
 CONFIG_SENSORS_MAX1619=m
 CONFIG_SENSORS_MAX1668=m
+CONFIG_SENSORS_MAX197=m
 CONFIG_SENSORS_MAX6639=m
 CONFIG_SENSORS_MAX6642=m
 CONFIG_SENSORS_MAX6650=m
+CONFIG_SENSORS_MAX6697=m
+CONFIG_SENSORS_MCP3021=m
+CONFIG_SENSORS_NCT6775=m
 CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PC87360=m
 CONFIG_SENSORS_PC87427=m
@@ -2719,20 +2842,20 @@ CONFIG_PMBUS=m
 CONFIG_SENSORS_PMBUS=m
 CONFIG_SENSORS_ADM1275=m
 CONFIG_SENSORS_LM25066=m
-CONFIG_SENSORS_LTC2978=m
+# CONFIG_SENSORS_LTC2978 is not set
 CONFIG_SENSORS_MAX16064=m
 CONFIG_SENSORS_MAX34440=m
 CONFIG_SENSORS_MAX8688=m
 CONFIG_SENSORS_UCD9000=m
 CONFIG_SENSORS_UCD9200=m
-CONFIG_SENSORS_ZL6100=m
+# CONFIG_SENSORS_ZL6100 is not set
 CONFIG_SENSORS_SHT15=m
 CONFIG_SENSORS_SHT21=m
 CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_SMM665=m
+# CONFIG_SENSORS_SMM665 is not set
 CONFIG_SENSORS_DME1737=m
 CONFIG_SENSORS_EMC1403=m
-CONFIG_SENSORS_EMC2103=m
+# CONFIG_SENSORS_EMC2103 is not set
 CONFIG_SENSORS_EMC6W201=m
 CONFIG_SENSORS_SMSC47M1=m
 CONFIG_SENSORS_SMSC47M192=m
@@ -2743,6 +2866,8 @@ CONFIG_SENSORS_SCH5636=m
 CONFIG_SENSORS_ADS1015=m
 CONFIG_SENSORS_ADS7828=m
 CONFIG_SENSORS_AMC6821=m
+CONFIG_SENSORS_INA209=m
+CONFIG_SENSORS_INA2XX=m
 CONFIG_SENSORS_THMC50=m
 CONFIG_SENSORS_TMP102=m
 CONFIG_SENSORS_TMP401=m
@@ -2769,58 +2894,63 @@ CONFIG_SENSORS_APPLESMC=m
 CONFIG_SENSORS_ACPI_POWER=m
 CONFIG_SENSORS_ATK0110=m
 CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_INTEL_POWERCLAMP=m
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_CORE=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
+CONFIG_WATCHDOG_NOWAYOUT=y
 
 #
 # Watchdog Device Drivers
 #
 CONFIG_SOFT_WATCHDOG=m
-CONFIG_ACQUIRE_WDT=m
-CONFIG_ADVANTECH_WDT=m
+CONFIG_RETU_WATCHDOG=m
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
 CONFIG_ALIM1535_WDT=m
 CONFIG_ALIM7101_WDT=m
 CONFIG_F71808E_WDT=m
 CONFIG_SP5100_TCO=m
 CONFIG_GEODE_WDT=m
-CONFIG_SC520_WDT=m
+# CONFIG_SC520_WDT is not set
 CONFIG_SBC_FITPC2_WATCHDOG=m
-CONFIG_EUROTECH_WDT=m
+# CONFIG_EUROTECH_WDT is not set
 CONFIG_IB700_WDT=m
 CONFIG_IBMASR=m
-CONFIG_WAFER_WDT=m
+# CONFIG_WAFER_WDT is not set
 CONFIG_I6300ESB_WDT=m
+CONFIG_IE6XX_WDT=m
 CONFIG_ITCO_WDT=m
 # CONFIG_ITCO_VENDOR_SUPPORT is not set
 CONFIG_IT8712F_WDT=m
 CONFIG_IT87_WDT=m
 CONFIG_HP_WATCHDOG=m
 CONFIG_HPWDT_NMI_DECODING=y
-CONFIG_SC1200_WDT=m
-CONFIG_SCx200_WDT=m
-CONFIG_PC87413_WDT=m
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
 CONFIG_NV_TCO=m
-CONFIG_60XX_WDT=m
-CONFIG_SBC8360_WDT=m
-CONFIG_SBC7240_WDT=m
-CONFIG_CPU5_WDT=m
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
 CONFIG_SMSC_SCH311X_WDT=m
-CONFIG_SMSC37B787_WDT=m
+# CONFIG_SMSC37B787_WDT is not set
+CONFIG_VIA_WDT=m
 CONFIG_W83627HF_WDT=m
 CONFIG_W83697HF_WDT=m
 CONFIG_W83697UG_WDT=m
 CONFIG_W83877F_WDT=m
 CONFIG_W83977F_WDT=m
 CONFIG_MACHZ_WDT=m
-CONFIG_SBC_EPX_C3_WATCHDOG=m
-
-#
-# ISA-based Watchdog Cards
-#
-# CONFIG_PCWATCHDOG is not set
-# CONFIG_MIXCOMWD is not set
-# CONFIG_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
 
 #
 # PCI-based Watchdog Cards
@@ -2839,397 +2969,588 @@ CONFIG_SSB_POSSIBLE=y
 #
 CONFIG_SSB=m
 CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
 CONFIG_SSB_PCIHOST_POSSIBLE=y
 CONFIG_SSB_PCIHOST=y
-# CONFIG_SSB_B43_PCI_BRIDGE is not set
-CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
-CONFIG_SSB_PCMCIAHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
 CONFIG_SSB_SDIOHOST_POSSIBLE=y
 CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
 # CONFIG_SSB_DEBUG is not set
 CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
 CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_SSB_DRIVER_GPIO=y
 CONFIG_BCMA_POSSIBLE=y
 
 #
 # Broadcom specific AMBA
 #
-CONFIG_BCMA=m
-CONFIG_BCMA_HOST_PCI_POSSIBLE=y
-# CONFIG_BCMA_HOST_PCI is not set
-# CONFIG_BCMA_DEBUG is not set
+# CONFIG_BCMA is not set
 
 #
 # Multifunction device drivers
 #
-CONFIG_MFD_CORE=m
-CONFIG_MFD_SM501=m
-# CONFIG_MFD_SM501_GPIO is not set
-CONFIG_HTC_PASIC3=m
-CONFIG_UCB1400_CORE=m
-CONFIG_TPS6105X=m
-CONFIG_TPS65010=m
-CONFIG_TPS6507X=m
-# CONFIG_MFD_TMIO is not set
-CONFIG_MFD_WM8400=m
-CONFIG_MFD_PCF50633=m
-CONFIG_PCF50633_ADC=m
-CONFIG_PCF50633_GPIO=m
-# CONFIG_ABX500_CORE is not set
+CONFIG_MFD_CORE=y
 CONFIG_MFD_CS5535=m
-CONFIG_MFD_TIMBERDALE=m
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_LPC_ICH=m
 CONFIG_LPC_SCH=m
-CONFIG_MFD_RDC321X=m
-CONFIG_MFD_JANZ_CMODIO=m
-CONFIG_MFD_VX855=m
+# CONFIG_MFD_JANZ_CMODIO is not set
+CONFIG_MFD_VIPERBOARD=m
+CONFIG_MFD_RETU=m
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_MFD_RDC321X is not set
+CONFIG_MFD_RTSX_PCI=m
+# CONFIG_MFD_SI476X_CORE is not set
+CONFIG_MFD_SM501=m
+CONFIG_MFD_SM501_GPIO=y
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS65912 is not set
 CONFIG_MFD_WL1273_CORE=m
-CONFIG_REGULATOR=y
-# CONFIG_REGULATOR_DEBUG is not set
-# CONFIG_REGULATOR_DUMMY is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=m
-# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
-# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
-CONFIG_REGULATOR_GPIO=m
-# CONFIG_REGULATOR_BQ24022 is not set
-# CONFIG_REGULATOR_MAX1586 is not set
-# CONFIG_REGULATOR_MAX8649 is not set
-# CONFIG_REGULATOR_MAX8660 is not set
-# CONFIG_REGULATOR_MAX8952 is not set
-# CONFIG_REGULATOR_WM8400 is not set
-# CONFIG_REGULATOR_PCF50633 is not set
-# CONFIG_REGULATOR_LP3971 is not set
-# CONFIG_REGULATOR_LP3972 is not set
-# CONFIG_REGULATOR_TPS6105X is not set
-# CONFIG_REGULATOR_TPS65023 is not set
-# CONFIG_REGULATOR_TPS6507X is not set
-# CONFIG_REGULATOR_ISL6271A is not set
-# CONFIG_REGULATOR_AD5398 is not set
-CONFIG_MEDIA_SUPPORT=m
+CONFIG_MFD_LM3533=m
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TMIO is not set
+CONFIG_MFD_VX855=m
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=y
 
 #
 # Multimedia core support
 #
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+CONFIG_MEDIA_RC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_DEV=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_DVB_CORE=m
-CONFIG_DVB_NET=y
-CONFIG_VIDEO_MEDIA=m
-
-#
-# Multimedia drivers
-#
-CONFIG_RC_CORE=m
-CONFIG_LIRC=m
-CONFIG_RC_MAP=m
-# CONFIG_IR_NEC_DECODER is not set
-# CONFIG_IR_RC5_DECODER is not set
-# CONFIG_IR_RC6_DECODER is not set
-# CONFIG_IR_JVC_DECODER is not set
-# CONFIG_IR_SONY_DECODER is not set
-# CONFIG_IR_RC5_SZ_DECODER is not set
-# CONFIG_IR_MCE_KBD_DECODER is not set
-# CONFIG_IR_LIRC_CODEC is not set
-# CONFIG_RC_ATI_REMOTE is not set
-# CONFIG_IR_ENE is not set
-# CONFIG_IR_IMON is not set
-# CONFIG_IR_MCEUSB is not set
-# CONFIG_IR_ITE_CIR is not set
-# CONFIG_IR_FINTEK is not set
-# CONFIG_IR_NUVOTON is not set
-# CONFIG_IR_REDRAT3 is not set
-# CONFIG_IR_STREAMZAP is not set
-# CONFIG_IR_WINBOND_CIR is not set
-# CONFIG_RC_LOOPBACK is not set
-CONFIG_MEDIA_ATTACH=y
-CONFIG_MEDIA_TUNER=m
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_XC4000=m
-CONFIG_MEDIA_TUNER_MC44S803=m
 CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
 # CONFIG_VIDEO_ADV_DEBUG is not set
 # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_IR_I2C=m
-
-#
-# Encoders, decoders, sensors and other helper chips
-#
-
-#
-# Audio decoders, processors and mixers
-#
-# CONFIG_VIDEO_TVAUDIO is not set
-# CONFIG_VIDEO_TDA7432 is not set
-# CONFIG_VIDEO_TDA9840 is not set
-# CONFIG_VIDEO_TEA6415C is not set
-# CONFIG_VIDEO_TEA6420 is not set
-# CONFIG_VIDEO_MSP3400 is not set
-# CONFIG_VIDEO_CS5345 is not set
-# CONFIG_VIDEO_CS53L32A is not set
-# CONFIG_VIDEO_TLV320AIC23B is not set
-# CONFIG_VIDEO_WM8775 is not set
-# CONFIG_VIDEO_WM8739 is not set
-# CONFIG_VIDEO_VP27SMPX is not set
-
-#
-# RDS decoders
-#
-# CONFIG_VIDEO_SAA6588 is not set
-
-#
-# Video decoders
-#
-# CONFIG_VIDEO_ADV7180 is not set
-# CONFIG_VIDEO_BT819 is not set
-# CONFIG_VIDEO_BT856 is not set
-# CONFIG_VIDEO_BT866 is not set
-# CONFIG_VIDEO_KS0127 is not set
-# CONFIG_VIDEO_SAA7110 is not set
-# CONFIG_VIDEO_SAA711X is not set
-# CONFIG_VIDEO_SAA7191 is not set
-# CONFIG_VIDEO_TVP514X is not set
-# CONFIG_VIDEO_TVP5150 is not set
-# CONFIG_VIDEO_TVP7002 is not set
-# CONFIG_VIDEO_VPX3220 is not set
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_DMA_SG=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+CONFIG_VIDEO_V4L2_INT_DEVICE=m
+CONFIG_DVB_CORE=y
+CONFIG_DVB_NET=y
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_MAX_ADAPTERS=8
+CONFIG_DVB_DYNAMIC_MINORS=y
 
 #
-# Video and audio decoders
+# Media drivers
 #
-# CONFIG_VIDEO_SAA717X is not set
-# CONFIG_VIDEO_CX25840 is not set
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_RC_DECODERS=y
+CONFIG_LIRC=m
+CONFIG_IR_LIRC_CODEC=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_RC5_SZ_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_RC_DEVICES=y
+# CONFIG_RC_ATI_REMOTE is not set
+CONFIG_IR_ENE=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_ITE_CIR=m
+CONFIG_IR_FINTEK=m
+CONFIG_IR_NUVOTON=m
+CONFIG_IR_REDRAT3=m
+CONFIG_IR_STREAMZAP=m
+CONFIG_IR_WINBOND_CIR=m
+CONFIG_IR_IGUANA=m
+CONFIG_IR_TTUSBIR=m
+CONFIG_RC_LOOPBACK=m
+CONFIG_IR_GPIO_CIR=m
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+# CONFIG_USB_GSPCA_TOPRO is not set
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_VIDEO_CPIA2=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+# CONFIG_USB_SN9C102 is not set
 
 #
-# MPEG video encoders
-#
-# CONFIG_VIDEO_CX2341X is not set
-
+# Analog TV USB devices
 #
-# Video encoders
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_TLG2300 is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_VIDEO_STK1160 is not set
+
+#
+# Analog/digital TV USB devices
+#
+CONFIG_VIDEO_AU0828=m
+CONFIG_VIDEO_AU0828_V4L2=y
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_TM6000=m
+CONFIG_VIDEO_TM6000_ALSA=m
+CONFIG_VIDEO_TM6000_DVB=m
+
+#
+# Digital TV USB devices
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_PCTV452E is not set
+CONFIG_DVB_USB_DW2102=m
+CONFIG_DVB_USB_CINERGY_T2=m
+CONFIG_DVB_USB_DTV5100=m
+CONFIG_DVB_USB_FRIIO=m
+CONFIG_DVB_USB_AZ6027=m
+CONFIG_DVB_USB_TECHNISAT_USB2=m
+CONFIG_DVB_USB_V2=m
+CONFIG_DVB_USB_AF9015=m
+CONFIG_DVB_USB_AF9035=m
+CONFIG_DVB_USB_ANYSEE=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_AZ6007=m
+CONFIG_DVB_USB_CE6230=m
+CONFIG_DVB_USB_EC168=m
+CONFIG_DVB_USB_GL861=m
+# CONFIG_DVB_USB_IT913X is not set
+CONFIG_DVB_USB_LME2510=m
+# CONFIG_DVB_USB_MXL111SF is not set
+CONFIG_DVB_USB_RTL28XXU=m
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_SMS_USB_DRV=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+
+#
+# Media capture support
+#
+CONFIG_VIDEO_MEYE=m
+
+#
+# Media capture/analog TV support
 #
-# CONFIG_VIDEO_SAA7127 is not set
-# CONFIG_VIDEO_SAA7185 is not set
-# CONFIG_VIDEO_ADV7170 is not set
-# CONFIG_VIDEO_ADV7175 is not set
-# CONFIG_VIDEO_ADV7343 is not set
-# CONFIG_VIDEO_AK881X is not set
+# CONFIG_VIDEO_IVTV is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_MXB is not set
 
 #
-# Camera sensor devices
-#
-# CONFIG_VIDEO_OV7670 is not set
-# CONFIG_VIDEO_MT9P031 is not set
-# CONFIG_VIDEO_MT9T001 is not set
-# CONFIG_VIDEO_MT9V011 is not set
-# CONFIG_VIDEO_MT9V032 is not set
-# CONFIG_VIDEO_TCM825X is not set
-# CONFIG_VIDEO_SR030PC30 is not set
-# CONFIG_VIDEO_NOON010PC30 is not set
-# CONFIG_VIDEO_M5MOLS is not set
-# CONFIG_VIDEO_S5K6AA is not set
+# Media capture/analog/hybrid TV support
+#
+CONFIG_VIDEO_CX18=m
+CONFIG_VIDEO_CX18_ALSA=m
+CONFIG_VIDEO_CX23885=m
+CONFIG_MEDIA_ALTERA_CI=m
+CONFIG_VIDEO_CX25821=m
+CONFIG_VIDEO_CX25821_ALSA=m
+CONFIG_VIDEO_CX88=m
+CONFIG_VIDEO_CX88_ALSA=m
+CONFIG_VIDEO_CX88_BLACKBIRD=m
+CONFIG_VIDEO_CX88_DVB=m
+CONFIG_VIDEO_CX88_VP3054=m
+CONFIG_VIDEO_CX88_MPEG=m
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_SAA7134_ALSA=m
+CONFIG_VIDEO_SAA7134_RC=y
+CONFIG_VIDEO_SAA7134_DVB=m
+CONFIG_VIDEO_SAA7164=m
+
+#
+# Media digital TV PCI Adapters
+#
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET_CORE=m
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
+# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
+CONFIG_DVB_PLUTO2=m
+CONFIG_DVB_DM1105=m
+CONFIG_DVB_PT1=m
+CONFIG_MANTIS_CORE=m
+CONFIG_DVB_MANTIS=m
+CONFIG_DVB_HOPPER=m
+CONFIG_DVB_NGENE=m
+# CONFIG_DVB_DDBRIDGE is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_CAFE_CCIC=m
+CONFIG_VIDEO_TIMBERDALE=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_SH_MOBILE_CSI2=m
+CONFIG_VIDEO_SH_MOBILE_CEU=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
+CONFIG_VIDEO_SH_VEU=m
+# CONFIG_V4L_TEST_DRIVERS is not set
 
 #
-# Flash devices
+# Supported MMC/SDIO adapters
 #
-# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_SMS_SDIO_DRV is not set
+# CONFIG_MEDIA_PARPORT_SUPPORT is not set
 
 #
-# Video improvement chips
+# Supported FireWire (IEEE 1394) Adapters
 #
-# CONFIG_VIDEO_UPD64031A is not set
-# CONFIG_VIDEO_UPD64083 is not set
+CONFIG_DVB_FIREDTV=m
+CONFIG_DVB_FIREDTV_INPUT=y
+CONFIG_MEDIA_COMMON_OPTIONS=y
 
 #
-# Miscelaneous helper chips
+# common driver options
 #
-# CONFIG_VIDEO_THS7303 is not set
-# CONFIG_VIDEO_M52790 is not set
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_BT848 is not set
-# CONFIG_VIDEO_PMS is not set
-# CONFIG_VIDEO_BWQCAM is not set
-# CONFIG_VIDEO_CQCAM is not set
-# CONFIG_VIDEO_W9966 is not set
-# CONFIG_VIDEO_CPIA2 is not set
-# CONFIG_VIDEO_ZORAN is not set
-# CONFIG_VIDEO_MEYE is not set
-# CONFIG_VIDEO_SAA7134 is not set
-# CONFIG_VIDEO_MXB is not set
-# CONFIG_VIDEO_HEXIUM_ORION is not set
-# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-# CONFIG_VIDEO_TIMBERDALE is not set
-# CONFIG_VIDEO_CX88 is not set
-# CONFIG_VIDEO_CX23885 is not set
-# CONFIG_VIDEO_CX25821 is not set
-# CONFIG_VIDEO_AU0828 is not set
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CX18 is not set
-# CONFIG_VIDEO_SAA7164 is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-# CONFIG_VIDEO_VIA_CAMERA is not set
-# CONFIG_SOC_CAMERA is not set
-CONFIG_V4L_USB_DRIVERS=y
-# CONFIG_USB_VIDEO_CLASS is not set
-CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-# CONFIG_USB_GSPCA is not set
-# CONFIG_VIDEO_PVRUSB2 is not set
-# CONFIG_VIDEO_HDPVR is not set
-# CONFIG_VIDEO_EM28XX is not set
-# CONFIG_VIDEO_TLG2300 is not set
-# CONFIG_VIDEO_CX231XX is not set
-# CONFIG_VIDEO_TM6000 is not set
-# CONFIG_VIDEO_USBVISION is not set
-# CONFIG_USB_ET61X251 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_PWC is not set
-# CONFIG_USB_ZR364XX is not set
-# CONFIG_USB_STKWEBCAM is not set
-# CONFIG_USB_S2255 is not set
-# CONFIG_V4L_MEM2MEM_DRIVERS is not set
-CONFIG_RADIO_ADAPTERS=y
-# CONFIG_RADIO_CADET is not set
-# CONFIG_RADIO_RTRACK is not set
-# CONFIG_RADIO_RTRACK2 is not set
-# CONFIG_RADIO_AZTECH is not set
-# CONFIG_RADIO_GEMTEK is not set
-# CONFIG_RADIO_MAXIRADIO is not set
-# CONFIG_RADIO_MIROPCM20 is not set
-# CONFIG_RADIO_SF16FMI is not set
-# CONFIG_RADIO_SF16FMR2 is not set
-# CONFIG_RADIO_TERRATEC is not set
-# CONFIG_RADIO_TRUST is not set
-# CONFIG_RADIO_TYPHOON is not set
-# CONFIG_RADIO_ZOLTRIX is not set
-# CONFIG_I2C_SI4713 is not set
-# CONFIG_RADIO_SI4713 is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_RADIO_SI470X is not set
-# CONFIG_USB_MR800 is not set
-# CONFIG_RADIO_TEA5764 is not set
-# CONFIG_RADIO_SAA7706H is not set
-# CONFIG_RADIO_TEF6862 is not set
-# CONFIG_RADIO_TIMBERDALE is not set
-# CONFIG_RADIO_WL1273 is not set
-
-#
-# Texas Instruments WL128x FM driver (ST based)
-#
-# CONFIG_RADIO_WL128X is not set
-CONFIG_DVB_MAX_ADAPTERS=8
-# CONFIG_DVB_DYNAMIC_MINORS is not set
-CONFIG_DVB_CAPTURE_DRIVERS=y
+CONFIG_VIDEO_CX2341X=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_CYPRESS_FIRMWARE=m
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_SMS_SIANO_MDTV=m
+CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
 
 #
-# Supported SAA7146 based PCI Adapters
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
 #
-# CONFIG_TTPCI_EEPROM is not set
-# CONFIG_DVB_AV7110 is not set
-# CONFIG_DVB_BUDGET_CORE is not set
+CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
+CONFIG_MEDIA_ATTACH=y
+CONFIG_VIDEO_IR_I2C=m
 
 #
-# Supported USB Adapters
+# Audio decoders, processors and mixers
 #
-# CONFIG_DVB_USB is not set
-# CONFIG_DVB_TTUSB_BUDGET is not set
-# CONFIG_DVB_TTUSB_DEC is not set
-# CONFIG_SMS_SIANO_MDTV is not set
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_VIDEO_TDA7432=m
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS5345=m
+CONFIG_VIDEO_WM8775=m
 
 #
-# Supported FlexCopII (B2C2) Adapters
+# RDS decoders
 #
-# CONFIG_DVB_B2C2_FLEXCOP is not set
+CONFIG_VIDEO_SAA6588=m
 
 #
-# Supported BT878 Adapters
+# Video decoders
 #
+CONFIG_VIDEO_ADV7180=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_TVP5150=m
 
 #
-# Supported Pluto2 Adapters
+# Video and audio decoders
 #
-# CONFIG_DVB_PLUTO2 is not set
+CONFIG_VIDEO_CX25840=m
 
 #
-# Supported SDMC DM1105 Adapters
+# Video encoders
 #
-# CONFIG_DVB_DM1105 is not set
 
 #
-# Supported FireWire (IEEE 1394) Adapters
+# Camera sensor devices
 #
-# CONFIG_DVB_FIREDTV is not set
+CONFIG_VIDEO_OV7670=m
+CONFIG_VIDEO_MT9V011=m
 
 #
-# Supported Earthsoft PT1 Adapters
+# Flash devices
 #
-# CONFIG_DVB_PT1 is not set
 
 #
-# Supported Mantis Adapters
+# Video improvement chips
 #
-# CONFIG_MANTIS_CORE is not set
 
 #
-# Supported nGene Adapters
+# Miscelaneous helper chips
 #
-# CONFIG_DVB_NGENE is not set
 
 #
-# Supported ddbridge ('Octopus') Adapters
+# Sensors used on soc_camera driver
 #
-# CONFIG_DVB_DDBRIDGE is not set
 
 #
-# Supported DVB Frontends
+# soc_camera sensor drivers
 #
-# CONFIG_DVB_FE_CUSTOMISE is not set
+CONFIG_SOC_CAMERA_IMX074=m
+CONFIG_SOC_CAMERA_MT9M001=m
+CONFIG_SOC_CAMERA_MT9M111=m
+CONFIG_SOC_CAMERA_MT9T031=m
+CONFIG_SOC_CAMERA_MT9T112=m
+CONFIG_SOC_CAMERA_MT9V022=m
+CONFIG_SOC_CAMERA_OV2640=m
+CONFIG_SOC_CAMERA_OV5642=m
+CONFIG_SOC_CAMERA_OV6650=m
+CONFIG_SOC_CAMERA_OV772X=m
+CONFIG_SOC_CAMERA_OV9640=m
+CONFIG_SOC_CAMERA_OV9740=m
+CONFIG_SOC_CAMERA_RJ54N1=m
+CONFIG_SOC_CAMERA_TW9910=m
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2063=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_MT2131=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_XC4000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_MEDIA_TUNER_MXL5007T=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_MEDIA_TUNER_TDA18218=m
+CONFIG_MEDIA_TUNER_FC0011=m
+CONFIG_MEDIA_TUNER_FC0012=m
+CONFIG_MEDIA_TUNER_FC0013=m
+CONFIG_MEDIA_TUNER_TDA18212=m
+CONFIG_MEDIA_TUNER_E4000=m
+CONFIG_MEDIA_TUNER_FC2580=m
+CONFIG_MEDIA_TUNER_TUA9001=m
+CONFIG_MEDIA_TUNER_IT913X=m
+CONFIG_MEDIA_TUNER_R820T=m
 
 #
 # Multistandard (satellite) frontends
 #
+CONFIG_DVB_STB0899=m
+CONFIG_DVB_STB6100=m
+CONFIG_DVB_STV090x=m
+CONFIG_DVB_STV6110x=m
 
 #
 # Multistandard (cable + terrestrial) frontends
 #
+CONFIG_DVB_DRXK=m
+CONFIG_DVB_TDA18271C2DD=m
 
 #
 # DVB-S (satellite) frontends
 #
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_ZL10036=m
+CONFIG_DVB_ZL10039=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0288=m
+CONFIG_DVB_STB6000=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_STV6110=m
+CONFIG_DVB_STV0900=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_TDA8261=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TUNER_CX24113=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+CONFIG_DVB_CX24116=m
+CONFIG_DVB_SI21XX=m
+CONFIG_DVB_TS2020=m
+CONFIG_DVB_DS3000=m
+CONFIG_DVB_MB86A16=m
+CONFIG_DVB_TDA10071=m
 
 #
 # DVB-T (terrestrial) frontends
 #
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_DRXD=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_AF9013=m
+CONFIG_DVB_EC100=m
+CONFIG_DVB_STV0367=m
+CONFIG_DVB_CXD2820R=m
+CONFIG_DVB_RTL2830=m
+CONFIG_DVB_RTL2832=m
 
 #
 # DVB-C (cable) frontends
 #
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
 
 #
 # ATSC (North American/Korean Terrestrial/Cable DTV) frontends
 #
+CONFIG_DVB_NXT200X=m
+CONFIG_DVB_OR51211=m
+CONFIG_DVB_OR51132=m
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_LGDT3305=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_AU8522_DTV=m
+CONFIG_DVB_AU8522_V4L=m
+CONFIG_DVB_S5H1411=m
 
 #
 # ISDB-T (terrestrial) frontends
 #
+CONFIG_DVB_S921=m
+CONFIG_DVB_DIB8000=m
+CONFIG_DVB_MB86A20S=m
 
 #
 # Digital terrestrial only tuners/PLL
 #
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
 
 #
 # SEC control devices for DVB-S
 #
+CONFIG_DVB_LNBP21=m
+CONFIG_DVB_ISL6405=m
+CONFIG_DVB_ISL6421=m
+CONFIG_DVB_ISL6423=m
+CONFIG_DVB_A8293=m
+CONFIG_DVB_LGS8GXX=m
+CONFIG_DVB_ATBM8830=m
+CONFIG_DVB_TDA665x=m
+CONFIG_DVB_IX2505V=m
+CONFIG_DVB_M88RS2000=m
+CONFIG_DVB_AF9033=m
 
 #
 # Tools to develop new frontends
@@ -3239,27 +3560,40 @@ CONFIG_DVB_CAPTURE_DRIVERS=y
 #
 # Graphics support
 #
-CONFIG_AGP=m
-CONFIG_AGP_ALI=m
-CONFIG_AGP_ATI=m
-CONFIG_AGP_AMD=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_NVIDIA=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_SWORKS=m
-CONFIG_AGP_VIA=m
-CONFIG_AGP_EFFICEON=m
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+CONFIG_AGP_SIS=y
+# CONFIG_AGP_SWORKS is not set
+CONFIG_AGP_VIA=y
+# CONFIG_AGP_EFFICEON is not set
 CONFIG_VGA_ARB=y
 CONFIG_VGA_ARB_MAX_GPUS=16
-# CONFIG_VGA_SWITCHEROO is not set
+CONFIG_VGA_SWITCHEROO=y
 CONFIG_DRM=m
+CONFIG_DRM_USB=m
 CONFIG_DRM_KMS_HELPER=m
+# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
 CONFIG_DRM_TTM=m
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+CONFIG_DRM_I2C_NXP_TDA998X=m
 CONFIG_DRM_TDFX=m
 CONFIG_DRM_R128=m
 CONFIG_DRM_RADEON=m
-CONFIG_DRM_RADEON_KMS=y
+# CONFIG_DRM_RADEON_UMS is not set
+CONFIG_DRM_NOUVEAU=m
+CONFIG_NOUVEAU_DEBUG=5
+CONFIG_NOUVEAU_DEBUG_DEFAULT=3
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
 CONFIG_DRM_I810=m
 CONFIG_DRM_I915=m
 CONFIG_DRM_I915_KMS=y
@@ -3268,151 +3602,118 @@ CONFIG_DRM_SIS=m
 CONFIG_DRM_VIA=m
 CONFIG_DRM_SAVAGE=m
 CONFIG_DRM_VMWGFX=m
-CONFIG_STUB_POULSBO=m
-CONFIG_VGASTATE=m
+# CONFIG_DRM_VMWGFX_FBCON is not set
+CONFIG_DRM_GMA500=m
+CONFIG_DRM_GMA600=y
+CONFIG_DRM_GMA3600=y
+CONFIG_DRM_UDL=m
+CONFIG_DRM_AST=m
+CONFIG_DRM_MGAG200=m
+CONFIG_DRM_CIRRUS_QEMU=m
+CONFIG_DRM_QXL=m
+# CONFIG_VGASTATE is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_BOOT_VESA_SUPPORT=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+CONFIG_HDMI=y
+CONFIG_FB=m
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
 # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
 CONFIG_FB_SYS_FILLRECT=m
 CONFIG_FB_SYS_COPYAREA=m
 CONFIG_FB_SYS_IMAGEBLIT=m
 # CONFIG_FB_FOREIGN_ENDIAN is not set
 CONFIG_FB_SYS_FOPS=m
-# CONFIG_FB_WMT_GE_ROPS is not set
 CONFIG_FB_DEFERRED_IO=y
-CONFIG_FB_SVGALIB=m
+# CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
 
 #
 # Frame buffer hardware drivers
 #
-CONFIG_FB_CIRRUS=m
-CONFIG_FB_PM2=m
-CONFIG_FB_PM2_FIFO_DISCONNECT=y
-CONFIG_FB_CYBER2000=m
-CONFIG_FB_CYBER2000_DDC=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
 # CONFIG_FB_ARC is not set
-CONFIG_FB_ASILIANT=y
-# CONFIG_FB_IMSTT is not set
 # CONFIG_FB_VGA16 is not set
-CONFIG_FB_UVESA=m
-CONFIG_FB_VESA=y
+# CONFIG_FB_UVESA is not set
 # CONFIG_FB_N411 is not set
 # CONFIG_FB_HGA is not set
 # CONFIG_FB_S1D13XXX is not set
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_I810=m
-CONFIG_FB_I810_GTF=y
-CONFIG_FB_I810_I2C=y
-CONFIG_FB_LE80578=m
-CONFIG_FB_CARILLO_RANCH=m
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY128_BACKLIGHT=y
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-# CONFIG_FB_ATY_GENERIC_LCD is not set
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-CONFIG_FB_S3=m
-CONFIG_FB_S3_DDC=y
-CONFIG_FB_SAVAGE=m
-CONFIG_FB_SAVAGE_I2C=y
-CONFIG_FB_SAVAGE_ACCEL=y
-CONFIG_FB_SIS=m
-CONFIG_FB_SIS_300=y
-CONFIG_FB_SIS_315=y
-CONFIG_FB_VIA=m
-# CONFIG_FB_VIA_DIRECT_PROCFS is not set
-# CONFIG_FB_VIA_X_COMPATIBILITY is not set
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-CONFIG_FB_3DFX_ACCEL=y
-CONFIG_FB_3DFX_I2C=y
-CONFIG_FB_VOODOO1=m
-CONFIG_FB_VT8623=m
-CONFIG_FB_TRIDENT=m
-CONFIG_FB_ARK=m
-CONFIG_FB_PM3=m
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
 # CONFIG_FB_CARMINE is not set
-CONFIG_FB_GEODE=y
-CONFIG_FB_GEODE_LX=m
-CONFIG_FB_GEODE_GX=m
-CONFIG_FB_GEODE_GX1=m
-CONFIG_FB_TMIO=m
-CONFIG_FB_TMIO_ACCELL=y
-CONFIG_FB_SM501=m
-CONFIG_FB_SMSCUFX=m
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SM501 is not set
+# CONFIG_FB_SMSCUFX is not set
 # CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
 # CONFIG_FB_VIRTUAL is not set
-CONFIG_FB_METRONOME=m
+# CONFIG_FB_METRONOME is not set
 # CONFIG_FB_MB862XX is not set
 # CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=m
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_GENERIC=m
-CONFIG_BACKLIGHT_PROGEAR=m
-CONFIG_BACKLIGHT_CARILLO_RANCH=m
+# CONFIG_BACKLIGHT_GENERIC is not set
+# CONFIG_BACKLIGHT_LM3533 is not set
+CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_APPLE=m
-CONFIG_BACKLIGHT_SAHARA=m
-CONFIG_BACKLIGHT_ADP8860=m
-CONFIG_BACKLIGHT_ADP8870=m
-CONFIG_BACKLIGHT_PCF50633=m
-
-#
-# Display device support
-#
-CONFIG_DISPLAY_SUPPORT=m
-
-#
-# Display hardware drivers
-#
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+# CONFIG_BACKLIGHT_OT200 is not set
 
 #
 # Console display driver support
 #
 CONFIG_VGA_CONSOLE=y
 # CONFIG_VGACON_SOFT_SCROLLBACK is not set
-# CONFIG_MDA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=m
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 # CONFIG_FONTS is not set
 CONFIG_FONT_8x8=y
 CONFIG_FONT_8x16=y
 CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_LOGO_LINUX_CLUT224=y
+# CONFIG_FB_SSD1307 is not set
 CONFIG_SOUND=m
 CONFIG_SOUND_OSS_CORE=y
 CONFIG_SOUND_OSS_CORE_PRECLAIM=y
@@ -3434,22 +3735,24 @@ CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
 CONFIG_SND_DYNAMIC_MINORS=y
 # CONFIG_SND_SUPPORT_OLD_API is not set
 CONFIG_SND_VERBOSE_PROCFS=y
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+# CONFIG_SND_DEBUG_VERBOSE is not set
+CONFIG_SND_PCM_XRUN_DEBUG=y
 CONFIG_SND_VMASTER=y
+CONFIG_SND_KCTL_JACK=y
 CONFIG_SND_DMA_SGBUF=y
 CONFIG_SND_RAWMIDI_SEQ=m
 CONFIG_SND_OPL3_LIB_SEQ=m
-CONFIG_SND_OPL4_LIB_SEQ=m
-CONFIG_SND_SBAWE_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
 CONFIG_SND_EMU10K1_SEQ=m
 CONFIG_SND_MPU401_UART=m
 CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL4_LIB=m
 CONFIG_SND_VX_LIB=m
 CONFIG_SND_AC97_CODEC=m
 CONFIG_SND_DRIVERS=y
-# CONFIG_SND_PCSP is not set
+CONFIG_SND_PCSP=m
 CONFIG_SND_DUMMY=m
 CONFIG_SND_ALOOP=m
 CONFIG_SND_VIRMIDI=m
@@ -3459,44 +3762,9 @@ CONFIG_SND_SERIAL_U16550=m
 CONFIG_SND_MPU401=m
 CONFIG_SND_PORTMAN2X4=m
 CONFIG_SND_AC97_POWER_SAVE=y
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT=60
-CONFIG_SND_WSS_LIB=m
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
 CONFIG_SND_SB_COMMON=m
-CONFIG_SND_SB8_DSP=m
 CONFIG_SND_SB16_DSP=m
-CONFIG_SND_ISA=y
-CONFIG_SND_ADLIB=m
-CONFIG_SND_AD1816A=m
-CONFIG_SND_AD1848=m
-CONFIG_SND_ALS100=m
-CONFIG_SND_AZT1605=m
-CONFIG_SND_AZT2316=m
-CONFIG_SND_AZT2320=m
-CONFIG_SND_CMI8330=m
-CONFIG_SND_CS4231=m
-CONFIG_SND_CS4236=m
-CONFIG_SND_ES1688=m
-CONFIG_SND_ES18XX=m
-CONFIG_SND_SC6000=m
-CONFIG_SND_GUSCLASSIC=m
-CONFIG_SND_GUSEXTREME=m
-CONFIG_SND_GUSMAX=m
-CONFIG_SND_INTERWAVE=m
-CONFIG_SND_INTERWAVE_STB=m
-CONFIG_SND_JAZZ16=m
-CONFIG_SND_OPL3SA2=m
-CONFIG_SND_OPTI92X_AD1848=m
-CONFIG_SND_OPTI92X_CS4231=m
-CONFIG_SND_OPTI93X=m
-CONFIG_SND_MIRO=m
-CONFIG_SND_SB8=m
-CONFIG_SND_SB16=m
-CONFIG_SND_SBAWE=m
-CONFIG_SND_SB16_CSP=y
-CONFIG_SND_SSCAPE=m
-CONFIG_SND_WAVEFRONT=m
-CONFIG_SND_MSND_PINNACLE=m
-CONFIG_SND_MSND_CLASSIC=m
 CONFIG_SND_TEA575X=m
 CONFIG_SND_PCI=y
 CONFIG_SND_AD1889=m
@@ -3509,7 +3777,7 @@ CONFIG_SND_ATIIXP_MODEM=m
 CONFIG_SND_AU8810=m
 CONFIG_SND_AU8820=m
 CONFIG_SND_AU8830=m
-CONFIG_SND_AW2=m
+# CONFIG_SND_AW2 is not set
 CONFIG_SND_AZT3328=m
 CONFIG_SND_BT87X=m
 # CONFIG_SND_BT87X_OVERCLOCK is not set
@@ -3552,11 +3820,10 @@ CONFIG_SND_HDA_PREALLOC_SIZE=64
 CONFIG_SND_HDA_HWDEP=y
 CONFIG_SND_HDA_RECONFIG=y
 CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_BEEP_MODE=2
+CONFIG_SND_HDA_INPUT_BEEP_MODE=1
 CONFIG_SND_HDA_INPUT_JACK=y
-CONFIG_SND_HDA_PATCH_LOADER=y
+# CONFIG_SND_HDA_PATCH_LOADER is not set
 CONFIG_SND_HDA_CODEC_REALTEK=y
-CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y
 CONFIG_SND_HDA_CODEC_ANALOG=y
 CONFIG_SND_HDA_CODEC_SIGMATEL=y
 CONFIG_SND_HDA_CODEC_VIA=y
@@ -3565,11 +3832,11 @@ CONFIG_SND_HDA_CODEC_CIRRUS=y
 CONFIG_SND_HDA_CODEC_CONEXANT=y
 CONFIG_SND_HDA_CODEC_CA0110=y
 CONFIG_SND_HDA_CODEC_CA0132=y
+# CONFIG_SND_HDA_CODEC_CA0132_DSP is not set
 CONFIG_SND_HDA_CODEC_CMEDIA=y
 CONFIG_SND_HDA_CODEC_SI3054=y
 CONFIG_SND_HDA_GENERIC=y
-CONFIG_SND_HDA_POWER_SAVE=y
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=60
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
 CONFIG_SND_HDSP=m
 CONFIG_SND_HDSPM=m
 CONFIG_SND_ICE1712=m
@@ -3588,7 +3855,7 @@ CONFIG_SND_RIPTIDE=m
 CONFIG_SND_RME32=m
 CONFIG_SND_RME96=m
 CONFIG_SND_RME9652=m
-CONFIG_SND_SIS7019=m
+# CONFIG_SND_SIS7019 is not set
 CONFIG_SND_SONICVIBES=m
 CONFIG_SND_TRIDENT=m
 CONFIG_SND_VIA82XX=m
@@ -3607,103 +3874,107 @@ CONFIG_SND_USB_6FIRE=m
 CONFIG_SND_FIREWIRE=y
 CONFIG_SND_FIREWIRE_LIB=m
 CONFIG_SND_FIREWIRE_SPEAKERS=m
-CONFIG_SND_ISIGHT=m
-CONFIG_SND_PCMCIA=y
-CONFIG_SND_VXPOCKET=m
-CONFIG_SND_PDAUDIOCF=m
+# CONFIG_SND_ISIGHT is not set
+# CONFIG_SND_SCS1X is not set
 # CONFIG_SND_SOC is not set
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-# CONFIG_HIDRAW is not set
 
 #
-# USB Input Devices
+# HID support
 #
-CONFIG_USB_HID=m
-# CONFIG_HID_PID is not set
-CONFIG_USB_HIDDEV=y
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
 
 #
 # Special HID drivers
 #
-CONFIG_HID_A4TECH=m
-CONFIG_HID_ACRUX=m
-# CONFIG_HID_ACRUX_FF is not set
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_PRODIKEYS=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_A4TECH=y
+# CONFIG_HID_ACRUX is not set
+CONFIG_HID_APPLE=y
+# CONFIG_HID_APPLEIR is not set
+CONFIG_HID_AUREAL=m
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+# CONFIG_HID_PRODIKEYS is not set
+CONFIG_HID_CYPRESS=y
+# CONFIG_HID_DRAGONRISE is not set
 # CONFIG_HID_EMS_FF is not set
-CONFIG_HID_ELECOM=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_HOLTEK=m
-# CONFIG_HOLTEK_FF is not set
+# CONFIG_HID_ELECOM is not set
+CONFIG_HID_EZKEY=y
+# CONFIG_HID_HOLTEK is not set
 CONFIG_HID_KEYTOUCH=m
-CONFIG_HID_KYE=m
-CONFIG_HID_UCLOGIC=m
-CONFIG_HID_WALTOP=m
+CONFIG_HID_KYE=y
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
 CONFIG_HID_GYRATION=m
+# CONFIG_HID_ICADE is not set
 CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
+CONFIG_HID_KENSINGTON=y
 CONFIG_HID_LCPOWER=m
-CONFIG_HID_LOGITECH=m
+CONFIG_HID_LENOVO_TPKBD=m
+CONFIG_HID_LOGITECH=y
 CONFIG_HID_LOGITECH_DJ=m
 # CONFIG_LOGITECH_FF is not set
 # CONFIG_LOGIRUMBLEPAD2_FF is not set
 # CONFIG_LOGIG940_FF is not set
 # CONFIG_LOGIWHEELS_FF is not set
-CONFIG_HID_MAGICMOUSE=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_MULTITOUCH=m
-CONFIG_HID_NTRIG=m
+# CONFIG_HID_MAGICMOUSE is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
 CONFIG_HID_ORTEK=m
-CONFIG_HID_PANTHERLORD=m
-# CONFIG_PANTHERLORD_FF is not set
+# CONFIG_HID_PANTHERLORD is not set
 CONFIG_HID_PETALYNX=m
 CONFIG_HID_PICOLCD=m
 CONFIG_HID_PICOLCD_FB=y
 CONFIG_HID_PICOLCD_BACKLIGHT=y
 CONFIG_HID_PICOLCD_LCD=y
 CONFIG_HID_PICOLCD_LEDS=y
-CONFIG_HID_PRIMAX=m
-CONFIG_HID_QUANTA=m
+CONFIG_HID_PICOLCD_CIR=y
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
 CONFIG_HID_ROCCAT=m
-CONFIG_HID_ROCCAT_COMMON=m
-CONFIG_HID_ROCCAT_ARVO=m
-CONFIG_HID_ROCCAT_KONE=m
-CONFIG_HID_ROCCAT_KONEPLUS=m
-CONFIG_HID_ROCCAT_KOVAPLUS=m
-CONFIG_HID_ROCCAT_PYRA=m
+CONFIG_HID_SAITEK=m
 CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
+# CONFIG_HID_SONY is not set
 CONFIG_HID_SPEEDLINK=m
+# CONFIG_HID_STEELSERIES is not set
 CONFIG_HID_SUNPLUS=m
-CONFIG_HID_GREENASIA=m
-# CONFIG_GREENASIA_FF is not set
-CONFIG_HID_SMARTJOYPLUS=m
-# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+CONFIG_HID_TIVO=m
 CONFIG_HID_TOPSEED=m
-CONFIG_HID_THRUSTMASTER=m
-# CONFIG_THRUSTMASTER_FF is not set
-CONFIG_HID_WACOM=m
-# CONFIG_HID_WACOM_POWER_SUPPLY is not set
-CONFIG_HID_WIIMOTE=m
-CONFIG_HID_ZEROPLUS=m
-# CONFIG_ZEROPLUS_FF is not set
+CONFIG_HID_THINGM=m
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
 CONFIG_HID_ZYDACRON=m
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_HID_SENSOR_HUB=m
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+CONFIG_I2C_HID=m
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
 CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB=y
 # CONFIG_USB_DEBUG is not set
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -3711,14 +3982,12 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 #
 # Miscellaneous USB options
 #
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_DEVICE_CLASS=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_OTG is not set
-CONFIG_USB_DWC3=m
-# CONFIG_USB_DWC3_DEBUG is not set
-# CONFIG_USB_MON is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
 CONFIG_USB_WUSB=m
 CONFIG_USB_WUSB_CBAF=m
 # CONFIG_USB_WUSB_CBAF_DEBUG is not set
@@ -3728,26 +3997,30 @@ CONFIG_USB_WUSB_CBAF=m
 #
 # CONFIG_USB_C67X00_HCD is not set
 CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=m
 # CONFIG_USB_XHCI_HCD_DEBUGGING is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_OXU210HP_HCD=m
-CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_EHCI_PCI=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
 CONFIG_USB_ISP1362_HCD=m
 CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
 # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
 CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_U132_HCD=m
+# CONFIG_USB_U132_HCD is not set
 CONFIG_USB_SL811_HCD=m
-# CONFIG_USB_SL811_HCD_ISO is not set
-# CONFIG_USB_SL811_CS is not set
-CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_SL811_HCD_ISO=y
+# CONFIG_USB_R8A66597_HCD is not set
 CONFIG_USB_WHCI_HCD=m
 CONFIG_USB_HWA_HCD=m
+CONFIG_USB_HCD_SSB=m
 
 #
 # USB Device Class drivers
@@ -3764,7 +4037,7 @@ CONFIG_USB_TMC=m
 #
 # also be needed; see USB_STORAGE Help for more info
 #
-CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
 CONFIG_USB_STORAGE_REALTEK=m
 CONFIG_REALTEK_AUTOPM=y
@@ -3780,20 +4053,24 @@ CONFIG_USB_STORAGE_ONETOUCH=m
 CONFIG_USB_STORAGE_KARMA=m
 CONFIG_USB_STORAGE_CYPRESS_ATACB=m
 CONFIG_USB_STORAGE_ENE_UB6250=m
-# CONFIG_USB_LIBUSUAL is not set
 
 #
 # USB Imaging devices
 #
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_DWC3=m
+CONFIG_USB_DWC3_HOST=y
+# CONFIG_USB_DWC3_DEBUG is not set
+CONFIG_USB_CHIPIDEA=m
+# CONFIG_USB_CHIPIDEA_HOST is not set
+# CONFIG_USB_CHIPIDEA_DEBUG is not set
 
 #
 # USB port drivers
 #
 CONFIG_USB_USS720=m
 CONFIG_USB_SERIAL=m
-CONFIG_USB_EZUSB=y
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_AIRCABLE=m
 CONFIG_USB_SERIAL_ARK3116=m
@@ -3803,7 +4080,7 @@ CONFIG_USB_SERIAL_WHITEHEAT=m
 CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
 CONFIG_USB_SERIAL_CP210X=m
 CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
+# CONFIG_USB_SERIAL_EMPEG is not set
 CONFIG_USB_SERIAL_FTDI_SIO=m
 CONFIG_USB_SERIAL_FUNSOFT=m
 CONFIG_USB_SERIAL_VISOR=m
@@ -3811,16 +4088,18 @@ CONFIG_USB_SERIAL_IPAQ=m
 CONFIG_USB_SERIAL_IR=m
 CONFIG_USB_SERIAL_EDGEPORT=m
 CONFIG_USB_SERIAL_EDGEPORT_TI=m
-CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_F81232=m
+# CONFIG_USB_SERIAL_GARMIN is not set
 CONFIG_USB_SERIAL_IPW=m
 CONFIG_USB_SERIAL_IUU=m
-# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
 # CONFIG_USB_SERIAL_KEYSPAN is not set
-# CONFIG_USB_SERIAL_KLSI is not set
+CONFIG_USB_SERIAL_KLSI=m
 # CONFIG_USB_SERIAL_KOBIL_SCT is not set
 CONFIG_USB_SERIAL_MCT_U232=m
+# CONFIG_USB_SERIAL_METRO is not set
 CONFIG_USB_SERIAL_MOS7720=m
-# CONFIG_USB_SERIAL_MOS7715_PARPORT is not set
+CONFIG_USB_SERIAL_MOS7715_PARPORT=y
 CONFIG_USB_SERIAL_MOS7840=m
 CONFIG_USB_SERIAL_MOTOROLA=m
 # CONFIG_USB_SERIAL_NAVMAN is not set
@@ -3829,22 +4108,25 @@ CONFIG_USB_SERIAL_OTI6858=m
 CONFIG_USB_SERIAL_QCAUX=m
 CONFIG_USB_SERIAL_QUALCOMM=m
 CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_HP4X=m
-CONFIG_USB_SERIAL_SAFE=m
-# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
 CONFIG_USB_SERIAL_SIEMENS_MPI=m
 CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_SYMBOL=m
-CONFIG_USB_SERIAL_TI=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
 # CONFIG_USB_SERIAL_CYBERJACK is not set
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_WWAN=m
 CONFIG_USB_SERIAL_OPTION=m
 # CONFIG_USB_SERIAL_OMNINET is not set
-CONFIG_USB_SERIAL_OPTICON=m
-CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
-CONFIG_USB_SERIAL_ZIO=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+CONFIG_USB_SERIAL_ZTE=m
 CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_QT2=m
 # CONFIG_USB_SERIAL_DEBUG is not set
 
 #
@@ -3852,14 +4134,14 @@ CONFIG_USB_SERIAL_SSU100=m
 #
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-# CONFIG_USB_ADUTUX is not set
+CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 CONFIG_USB_LCD=m
 CONFIG_USB_LED=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
 # CONFIG_USB_IDMOUSE is not set
 CONFIG_USB_FTDI_ELAN=m
 # CONFIG_USB_APPLEDISPLAY is not set
@@ -3869,21 +4151,17 @@ CONFIG_USB_SISUSBVGA_CON=y
 # CONFIG_USB_TRANCEVIBRATOR is not set
 CONFIG_USB_IOWARRIOR=m
 # CONFIG_USB_TEST is not set
-CONFIG_USB_ISIGHTFW=m
+# CONFIG_USB_ISIGHTFW is not set
 CONFIG_USB_YUREX=m
+CONFIG_USB_EZUSB_FX2=m
+CONFIG_USB_HSIC_USB3503=m
 CONFIG_USB_ATM=m
 CONFIG_USB_SPEEDTOUCH=m
 CONFIG_USB_CXACRU=m
 CONFIG_USB_UEAGLEATM=m
 CONFIG_USB_XUSBATM=m
+# CONFIG_USB_PHY is not set
 # CONFIG_USB_GADGET is not set
-
-#
-# OTG and related infrastructure
-#
-CONFIG_USB_OTG_UTILS=y
-CONFIG_USB_GPIO_VBUS=m
-CONFIG_NOP_USB_XCEIV=m
 CONFIG_UWB=m
 CONFIG_UWB_HWA=m
 CONFIG_UWB_WHCI=m
@@ -3899,23 +4177,26 @@ CONFIG_MMC=m
 CONFIG_MMC_BLOCK=m
 CONFIG_MMC_BLOCK_MINORS=8
 CONFIG_MMC_BLOCK_BOUNCE=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_TEST=m
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
 
 #
 # MMC/SD/SDIO Host Controller Drivers
 #
 CONFIG_MMC_SDHCI=m
 CONFIG_MMC_SDHCI_PCI=m
-# CONFIG_MMC_RICOH_MMC is not set
+CONFIG_MMC_RICOH_MMC=y
+CONFIG_MMC_SDHCI_ACPI=m
 CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_SDHCI_PXAV3=m
+CONFIG_MMC_SDHCI_PXAV2=m
 CONFIG_MMC_WBSD=m
 CONFIG_MMC_TIFM_SD=m
-CONFIG_MMC_SDRICOH_CS=m
 CONFIG_MMC_CB710=m
 CONFIG_MMC_VIA_SDMMC=m
 CONFIG_MMC_VUB300=m
 CONFIG_MMC_USHC=m
+CONFIG_MMC_REALTEK_PCI=m
 CONFIG_MEMSTICK=m
 # CONFIG_MEMSTICK_DEBUG is not set
 
@@ -3931,6 +4212,7 @@ CONFIG_MSPRO_BLOCK=m
 CONFIG_MEMSTICK_TIFM_MS=m
 CONFIG_MEMSTICK_JMICRON_38X=m
 CONFIG_MEMSTICK_R592=m
+CONFIG_MEMSTICK_REALTEK_PCI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 
@@ -3938,43 +4220,73 @@ CONFIG_LEDS_CLASS=y
 # LED drivers
 #
 CONFIG_LEDS_LM3530=m
-CONFIG_LEDS_NET48XX=m
-CONFIG_LEDS_NET5501=m
-CONFIG_LEDS_WRAP=m
-CONFIG_LEDS_PCA9532=m
-CONFIG_LEDS_PCA9532_GPIO=y
-CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LM3533=m
+CONFIG_LEDS_LM3642=m
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
 CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_LP55XX_COMMON=m
 CONFIG_LEDS_LP5521=m
 CONFIG_LEDS_LP5523=m
+CONFIG_LEDS_LP5562=m
 CONFIG_LEDS_CLEVO_MAIL=m
-CONFIG_LEDS_PCA955X=m
-CONFIG_LEDS_REGULATOR=m
-CONFIG_LEDS_BD2802=m
+# CONFIG_LEDS_PCA955X is not set
+CONFIG_LEDS_PCA9633=m
+CONFIG_LEDS_PWM=m
+# CONFIG_LEDS_BD2802 is not set
 CONFIG_LEDS_INTEL_SS4200=m
 CONFIG_LEDS_LT3593=m
 CONFIG_LEDS_DELL_NETBOOKS=m
-CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_RENESAS_TPU is not set
+CONFIG_LEDS_TCA6507=m
+CONFIG_LEDS_LM355x=m
+CONFIG_LEDS_OT200=m
+CONFIG_LEDS_BLINKM=m
 
 #
 # LED Triggers
 #
+CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_ONESHOT=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+# CONFIG_LEDS_TRIGGER_CPU is not set
 CONFIG_LEDS_TRIGGER_GPIO=m
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 
 #
 # iptables trigger is under Netfilter config (LED target)
 #
-CONFIG_LEDS_TRIGGER_NETDEV=m
+CONFIG_LEDS_TRIGGER_TRANSIENT=m
+CONFIG_LEDS_TRIGGER_CAMERA=m
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
-# CONFIG_EDAC is not set
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=m
+CONFIG_EDAC_MCE_INJ=m
+CONFIG_EDAC_MM_EDAC=m
+# CONFIG_EDAC_AMD76X is not set
+# CONFIG_EDAC_E7XXX is not set
+CONFIG_EDAC_E752X=m
+# CONFIG_EDAC_I82875P is not set
+CONFIG_EDAC_I82975X=m
+CONFIG_EDAC_I3000=m
+CONFIG_EDAC_I3200=m
+CONFIG_EDAC_X38=m
+CONFIG_EDAC_I5400=m
+CONFIG_EDAC_I7CORE=m
+# CONFIG_EDAC_I82860 is not set
+# CONFIG_EDAC_R82600 is not set
+CONFIG_EDAC_I5000=m
+CONFIG_EDAC_I5100=m
+CONFIG_EDAC_I7300=m
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
 CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
 # CONFIG_RTC_DEBUG is not set
 
@@ -3999,12 +4311,13 @@ CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_ISL1208=m
 CONFIG_RTC_DRV_ISL12022=m
 CONFIG_RTC_DRV_X1205=m
+CONFIG_RTC_DRV_PCF8523=m
 CONFIG_RTC_DRV_PCF8563=m
 CONFIG_RTC_DRV_PCF8583=m
 CONFIG_RTC_DRV_M41T80=m
 CONFIG_RTC_DRV_M41T80_WDT=y
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_S35390A=m
+# CONFIG_RTC_DRV_S35390A is not set
 CONFIG_RTC_DRV_FM3130=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_RX8025=m
@@ -4024,106 +4337,174 @@ CONFIG_RTC_DRV_DS1511=m
 CONFIG_RTC_DRV_DS1553=m
 CONFIG_RTC_DRV_DS1742=m
 CONFIG_RTC_DRV_STK17TA8=m
-CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T86 is not set
 CONFIG_RTC_DRV_M48T35=m
 CONFIG_RTC_DRV_M48T59=m
 CONFIG_RTC_DRV_MSM6242=m
 CONFIG_RTC_DRV_BQ4802=m
 CONFIG_RTC_DRV_RP5C01=m
 CONFIG_RTC_DRV_V3020=m
-CONFIG_RTC_DRV_PCF50633=m
+CONFIG_RTC_DRV_DS2404=m
 
 #
 # on-CPU RTC drivers
 #
+CONFIG_RTC_DRV_SNVS=m
+
+#
+# HID Sensor RTC drivers
+#
+CONFIG_RTC_DRV_HID_SENSOR_TIME=m
 CONFIG_DMADEVICES=y
 # CONFIG_DMADEVICES_DEBUG is not set
 
 #
 # DMA Devices
 #
-CONFIG_INTEL_MID_DMAC=m
+# CONFIG_INTEL_MID_DMAC is not set
 CONFIG_INTEL_IOATDMA=m
+CONFIG_DW_DMAC=m
+# CONFIG_DW_DMAC_BIG_ENDIAN_IO is not set
 CONFIG_TIMB_DMA=m
 CONFIG_PCH_DMA=m
 CONFIG_DMA_ENGINE=y
+CONFIG_DMA_ACPI=y
+CONFIG_DMA_OF=y
 
 #
 # DMA Clients
 #
 CONFIG_NET_DMA=y
 CONFIG_ASYNC_TX_DMA=y
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 CONFIG_DCA=m
 # CONFIG_AUXDISPLAY is not set
 CONFIG_UIO=m
-CONFIG_UIO_CIF=m
-CONFIG_UIO_PDRV=m
-CONFIG_UIO_PDRV_GENIRQ=m
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_DMEM_GENIRQ is not set
 CONFIG_UIO_AEC=m
 CONFIG_UIO_SERCOS3=m
 CONFIG_UIO_PCI_GENERIC=m
-CONFIG_UIO_NETX=m
+# CONFIG_UIO_NETX is not set
+# CONFIG_VFIO is not set
+CONFIG_VIRT_DRIVERS=y
 CONFIG_VIRTIO=y
-CONFIG_VIRTIO_RING=y
 
 #
 # Virtio drivers
 #
-CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=m
-CONFIG_VIRTIO_MMIO=m
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_HYPERV is not set
 CONFIG_STAGING=y
 CONFIG_ET131X=m
-CONFIG_SLICOSS=m
-CONFIG_USBIP_CORE=m
-CONFIG_USBIP_VHCI_HCD=m
-CONFIG_USBIP_HOST=m
-# CONFIG_USBIP_DEBUG is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
 # CONFIG_W35UND is not set
 # CONFIG_PRISM2_USB is not set
 # CONFIG_ECHO is not set
 # CONFIG_COMEDI is not set
+# CONFIG_FB_OLPC_DCON is not set
 # CONFIG_ASUS_OLED is not set
 # CONFIG_PANEL is not set
 # CONFIG_R8187SE is not set
 # CONFIG_RTL8192U is not set
+CONFIG_RTLLIB=m
+CONFIG_RTLLIB_CRYPTO_CCMP=m
+CONFIG_RTLLIB_CRYPTO_TKIP=m
+CONFIG_RTLLIB_CRYPTO_WEP=m
 # CONFIG_RTL8192E is not set
 # CONFIG_R8712U is not set
-# CONFIG_RTS_PSTOR is not set
 # CONFIG_RTS5139 is not set
 # CONFIG_TRANZPORT is not set
-# CONFIG_POHMELFS is not set
-CONFIG_IDE_PHISON=m
+# CONFIG_IDE_PHISON is not set
 # CONFIG_LINE6_USB is not set
-CONFIG_DRM_NOUVEAU=m
-CONFIG_DRM_NOUVEAU_BACKLIGHT=y
-
-#
-# I2C encoder or helper chips
-#
-# CONFIG_DRM_I2C_CH7006 is not set
-# CONFIG_DRM_I2C_SIL164 is not set
 # CONFIG_USB_SERIAL_QUATECH2 is not set
-# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
 # CONFIG_VT6655 is not set
 # CONFIG_VT6656 is not set
-# CONFIG_HYPERV_STORAGE is not set
-CONFIG_HYPERV_NET=m
-CONFIG_HYPERV_MOUSE=m
-# CONFIG_VME_BUS is not set
 # CONFIG_DX_SEP is not set
-# CONFIG_IIO is not set
-# CONFIG_XVMALLOC is not set
-# CONFIG_ZRAM is not set
-# CONFIG_WLAGS49_H2 is not set
-# CONFIG_WLAGS49_H25 is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+
+#
+# Digital gyroscope sensors
+#
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+# CONFIG_IIO_SYSFS_TRIGGER is not set
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
 # CONFIG_FB_SM7XX is not set
 # CONFIG_CRYSTALHD is not set
-# CONFIG_CXT1E1 is not set
 # CONFIG_FB_XGI is not set
 # CONFIG_ACPI_QUICKSTART is not set
-# CONFIG_SBE_2T3E3 is not set
 # CONFIG_USB_ENESTORAGE is not set
 # CONFIG_BCM_WIMAX is not set
 # CONFIG_FT1000 is not set
@@ -4134,17 +4515,36 @@ CONFIG_HYPERV_MOUSE=m
 # CONFIG_SPEAKUP is not set
 # CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
 # CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
-# CONFIG_DRM_PSB is not set
-CONFIG_INTEL_MEI=m
 # CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_CSR_WIFI is not set
+CONFIG_NET_VENDOR_SILICOM=y
+CONFIG_SBYPASS=m
+CONFIG_BPCTL=m
+CONFIG_CED1401=m
+# CONFIG_DGRP is not set
+CONFIG_FIREWIRE_SERIAL=m
+CONFIG_USB_DWC2=m
+# CONFIG_USB_DWC2_DEBUG is not set
+# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
 CONFIG_X86_PLATFORM_DEVICES=y
 CONFIG_ACER_WMI=m
 CONFIG_ACERHDF=m
 CONFIG_ASUS_LAPTOP=m
+CONFIG_CHROMEOS_LAPTOP=m
+CONFIG_DELL_LAPTOP=m
 CONFIG_DELL_WMI=m
 CONFIG_DELL_WMI_AIO=m
 CONFIG_FUJITSU_LAPTOP=m
-CONFIG_FUJITSU_LAPTOP_DEBUG=y
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+# CONFIG_FUJITSU_TABLET is not set
+CONFIG_AMILO_RFKILL=m
 CONFIG_TC1100_WMI=m
 CONFIG_HP_ACCEL=m
 CONFIG_HP_WMI=m
@@ -4152,7 +4552,7 @@ CONFIG_MSI_LAPTOP=m
 CONFIG_PANASONIC_LAPTOP=m
 CONFIG_COMPAL_LAPTOP=m
 CONFIG_SONY_LAPTOP=m
-# CONFIG_SONYPI_COMPAT is not set
+CONFIG_SONYPI_COMPAT=y
 CONFIG_IDEAPAD_LAPTOP=m
 CONFIG_THINKPAD_ACPI=m
 CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
@@ -4162,21 +4562,36 @@ CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
 CONFIG_THINKPAD_ACPI_VIDEO=y
 CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
 CONFIG_SENSORS_HDAPS=m
-CONFIG_INTEL_MENLOW=m
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_ASUS_WMI=m
+CONFIG_ASUS_NB_WMI=m
+CONFIG_EEEPC_WMI=m
 CONFIG_ACPI_WMI=m
 CONFIG_MSI_WMI=m
-CONFIG_ACPI_ASUS=m
 CONFIG_TOPSTAR_LAPTOP=m
 CONFIG_ACPI_TOSHIBA=m
 CONFIG_TOSHIBA_BT_RFKILL=m
 CONFIG_ACPI_CMPC=m
 CONFIG_INTEL_IPS=m
-CONFIG_IBM_RTL=m
-CONFIG_XO15_EBOOK=m
+# CONFIG_IBM_RTL is not set
+CONFIG_XO1_RFKILL=m
+# CONFIG_XO15_EBOOK is not set
 CONFIG_SAMSUNG_LAPTOP=m
 CONFIG_MXM_WMI=m
 CONFIG_INTEL_OAKTRAIL=m
 CONFIG_SAMSUNG_Q10=m
+# CONFIG_APPLE_GMUX is not set
+CONFIG_PVPANIC=m
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK=y
+
+#
+# Common Clock Framework
+#
+# CONFIG_COMMON_CLK_DEBUG is not set
+# CONFIG_COMMON_CLK_SI5351 is not set
 
 #
 # Hardware Spinlock drivers
@@ -4185,55 +4600,146 @@ CONFIG_CLKSRC_I8253=y
 CONFIG_CLKEVT_I8253=y
 CONFIG_I8253_LOCK=y
 CONFIG_CLKBLD_I8253=y
+CONFIG_MAILBOX=y
 CONFIG_IOMMU_API=y
 CONFIG_IOMMU_SUPPORT=y
+CONFIG_OF_IOMMU=y
 CONFIG_DMAR_TABLE=y
 CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_DEFAULT_ON=y
+# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
 CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_HYPERV=m
-CONFIG_HYPERV_UTILS=m
-CONFIG_PM_DEVFREQ=y
 
 #
-# DEVFREQ Governors
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+CONFIG_MEMORY=y
+CONFIG_IIO=m
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_BUFFER_CB=y
+CONFIG_IIO_KFIFO_BUF=m
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+
+#
+# Accelerometers
+#
+# CONFIG_HID_SENSOR_ACCEL_3D is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_EXYNOS_ADC is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+# CONFIG_VIPERBOARD_ADC is not set
+
+#
+# Amplifiers
+#
+
+#
+# Hid Sensor IIO Common
+#
+CONFIG_HID_SENSOR_IIO_COMMON=m
+CONFIG_HID_SENSOR_IIO_TRIGGER=m
+# CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS is not set
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5446 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_HID_SENSOR_GYRO_3D is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
 #
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
-CONFIG_DEVFREQ_GOV_PERFORMANCE=y
-CONFIG_DEVFREQ_GOV_POWERSAVE=y
-CONFIG_DEVFREQ_GOV_USERSPACE=y
+# CONFIG_INV_MPU6050_IIO is not set
 
 #
-# DEVFREQ Drivers
+# Light sensors
 #
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_LM3533 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+# CONFIG_HID_SENSOR_ALS is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_HID_SENSOR_MAGNETOMETER_3D is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+CONFIG_PWM=y
+CONFIG_IRQCHIP=y
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
 
 #
 # Firmware Drivers
 #
-# CONFIG_EDD is not set
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
 CONFIG_FIRMWARE_MEMMAP=y
-# CONFIG_DELL_RBU is not set
-# CONFIG_DCDBAS is not set
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
 CONFIG_DMIID=y
-CONFIG_DMI_SYSFS=m
+CONFIG_DMI_SYSFS=y
 CONFIG_ISCSI_IBFT_FIND=y
 CONFIG_ISCSI_IBFT=m
-CONFIG_SIGMA=m
 # CONFIG_GOOGLE_FIRMWARE is not set
 
+#
+# EFI (Extensible Firmware Interface) Support
+#
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE=y
+# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
+
 #
 # File systems
 #
+CONFIG_DCACHE_WORD_ACCESS=y
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_USE_FOR_EXT23=y
-CONFIG_EXT4_FS_XATTR=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
 # CONFIG_EXT4_DEBUG is not set
 CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
 CONFIG_FS_MBCACHE=y
 CONFIG_REISERFS_FS=m
 # CONFIG_REISERFS_CHECK is not set
@@ -4241,43 +4747,47 @@ CONFIG_REISERFS_PROC_INFO=y
 CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
+# CONFIG_JFS_FS is not set
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
-# CONFIG_XFS_RT is not set
+CONFIG_XFS_RT=y
+# CONFIG_XFS_WARN is not set
 # CONFIG_XFS_DEBUG is not set
 # CONFIG_GFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
+# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
+# CONFIG_BTRFS_DEBUG is not set
 # CONFIG_NILFS2_FS is not set
 CONFIG_FS_POSIX_ACL=y
-CONFIG_EXPORTFS=m
+CONFIG_EXPORTFS=y
 CONFIG_FILE_LOCKING=y
 CONFIG_FSNOTIFY=y
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
 # CONFIG_QUOTA_DEBUG is not set
-CONFIG_QUOTA_TREE=m
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
 CONFIG_QUOTACTL=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_GENERIC_ACL=y
 
 #
 # Caches
 #
 CONFIG_FSCACHE=m
-# CONFIG_FSCACHE_STATS is not set
+CONFIG_FSCACHE_STATS=y
 # CONFIG_FSCACHE_HISTOGRAM is not set
 # CONFIG_FSCACHE_DEBUG is not set
 # CONFIG_FSCACHE_OBJECT_LIST is not set
@@ -4288,7 +4798,7 @@ CONFIG_CACHEFILES=m
 #
 # CD-ROM/DVD Filesystems
 #
-CONFIG_ISO9660_FS=m
+CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
@@ -4298,10 +4808,10 @@ CONFIG_UDF_NLS=y
 # DOS/FAT/NT Filesystems
 #
 CONFIG_FAT_FS=m
-# CONFIG_MSDOS_FS is not set
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=850
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 # CONFIG_NTFS_FS is not set
 
 #
@@ -4310,39 +4820,22 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 CONFIG_PROC_FS=y
 # CONFIG_PROC_KCORE is not set
 CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
+CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_TMPFS_XATTR=y
 # CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
-CONFIG_ECRYPT_FS=m
+# CONFIG_ECRYPT_FS is not set
 # CONFIG_HFS_FS is not set
 # CONFIG_HFSPLUS_FS is not set
 # CONFIG_BEFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
-# CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-# CONFIG_JFFS2_LZO is not set
-CONFIG_JFFS2_RTIME=y
-CONFIG_JFFS2_RUBIN=y
-# CONFIG_JFFS2_CMODE_NONE is not set
-CONFIG_JFFS2_CMODE_PRIORITY=y
-# CONFIG_JFFS2_CMODE_SIZE is not set
-# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
-# CONFIG_UBIFS_FS is not set
 # CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_SQUASHFS is not set
@@ -4351,113 +4844,117 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
 # CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
 # CONFIG_ROMFS_FS is not set
-# CONFIG_PSTORE is not set
+CONFIG_PSTORE=y
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_FTRACE is not set
+# CONFIG_PSTORE_RAM is not set
 # CONFIG_SYSV_FS is not set
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
+# CONFIG_UFS_FS is not set
 # CONFIG_EXOFS_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_EFIVAR_FS=m
+CONFIG_ORE=m
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
+CONFIG_NFS_V2=m
+CONFIG_NFS_V3=m
 CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-# CONFIG_NFS_V4_1 is not set
-# CONFIG_NFS_FSCACHE is not set
+CONFIG_NFS_V4=m
+# CONFIG_NFS_SWAP is not set
+CONFIG_NFS_V4_1=y
+CONFIG_PNFS_FILE_LAYOUT=m
+CONFIG_PNFS_BLOCK=m
+CONFIG_PNFS_OBJLAYOUT=m
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="ipfire.org"
+CONFIG_NFS_FSCACHE=y
 # CONFIG_NFS_USE_LEGACY_DNS is not set
 CONFIG_NFS_USE_KERNEL_DNS=y
-# CONFIG_NFS_USE_NEW_IDMAPPER is not set
 CONFIG_NFSD=m
 CONFIG_NFSD_V2_ACL=y
 CONFIG_NFSD_V3=y
 CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
+# CONFIG_NFSD_FAULT_INJECTION is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_ACL_SUPPORT=m
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC_GSS=m
+CONFIG_SUNRPC_BACKCHANNEL=y
 CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_SUNRPC_DEBUG is not set
 # CONFIG_CEPH_FS is not set
 CONFIG_CIFS=m
 CONFIG_CIFS_STATS=y
-CONFIG_CIFS_STATS2=y
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_WEAK_PW_HASH is not set
 # CONFIG_CIFS_UPCALL is not set
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-# CONFIG_CIFS_DEBUG2 is not set
-# CONFIG_CIFS_DFS_UPCALL is not set
-# CONFIG_CIFS_FSCACHE is not set
 CONFIG_CIFS_ACL=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SMB2=y
+CONFIG_CIFS_FSCACHE=y
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-CONFIG_LDM_PARTITION=y
-# CONFIG_LDM_DEBUG is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-# CONFIG_SYSV68_PARTITION is not set
 CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="cp850"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-CONFIG_NLS_CODEPAGE_850=y
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=y
+CONFIG_NLS_DEFAULT="utf-8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+# CONFIG_DLM_DEBUG is not set
 
 #
 # Kernel hacking
@@ -4467,38 +4964,40 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
 CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
 # CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
 # CONFIG_DEBUG_SECTION_MISMATCH is not set
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-# CONFIG_LOCKUP_DETECTOR is not set
-# CONFIG_HARDLOCKUP_DETECTOR is not set
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+# CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
+CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_OBJECTS is not set
 # CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
 # CONFIG_DEBUG_KMEMLEAK is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_SPARSE_RCU_POINTER is not set
-# CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_HIGHMEM is not set
@@ -4508,38 +5007,79 @@ CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_VIRTUAL is not set
 # CONFIG_DEBUG_WRITECOUNT is not set
 CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_LIST is not set
+CONFIG_DEBUG_LIST=y
 # CONFIG_TEST_LIST_SORT is not set
 # CONFIG_DEBUG_SG is not set
 # CONFIG_DEBUG_NOTIFIERS is not set
 # CONFIG_DEBUG_CREDENTIALS is not set
 CONFIG_ARCH_WANT_FRAME_POINTERS=y
-# CONFIG_FRAME_POINTER is not set
+CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_SPARSE_RCU_POINTER is not set
 # CONFIG_RCU_TORTURE_TEST is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
 # CONFIG_DEBUG_PER_CPU_MAPS is not set
-# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
 # CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
 # CONFIG_DEBUG_PAGEALLOC is not set
 CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
 CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
 CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
 CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
 CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
 CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
 CONFIG_TRACING_SUPPORT=y
-# CONFIG_FTRACE is not set
-# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
-# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+CONFIG_RING_BUFFER_BENCHMARK=m
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+CONFIG_RBTREE_TEST=m
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_BUILD_DOCSRC is not set
+CONFIG_DYNAMIC_DEBUG=y
 # CONFIG_DMA_API_DEBUG is not set
 # CONFIG_ATOMIC64_SELFTEST is not set
 CONFIG_ASYNC_RAID6_TEST=m
@@ -4547,19 +5087,17 @@ CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 CONFIG_HAVE_ARCH_KMEMCHECK=y
-# CONFIG_KMEMCHECK is not set
+# CONFIG_TEST_STRING_HELPERS is not set
 # CONFIG_TEST_KSTRTOX is not set
-# CONFIG_STRICT_DEVMEM is not set
-CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_STRICT_DEVMEM=y
+# CONFIG_X86_VERBOSE_BOOTUP is not set
 CONFIG_EARLY_PRINTK=y
 # CONFIG_EARLY_PRINTK_DBGP is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_X86_PTDUMP is not set
-CONFIG_DEBUG_RODATA=y
-CONFIG_DEBUG_RODATA_TEST=y
-# CONFIG_DEBUG_SET_MODULE_RONX is not set
-# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DEBUG_NX_TEST=m
 CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
 # CONFIG_IOMMU_STRESS is not set
 CONFIG_HAVE_MMIOTRACE_SUPPORT=y
 CONFIG_IO_DELAY_TYPE_0X80=0
@@ -4571,20 +5109,186 @@ CONFIG_IO_DELAY_0X80=y
 # CONFIG_IO_DELAY_UDELAY is not set
 # CONFIG_IO_DELAY_NONE is not set
 CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
 # CONFIG_CPA_DEBUG is not set
-# CONFIG_OPTIMIZE_INLINING is not set
-# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
 
 #
 # Security options
 #
+
+#
+# Grsecurity
+#
+CONFIG_ARCH_TRACK_EXEC_LIMIT=y
+CONFIG_PAX_USERCOPY_SLABS=y
+CONFIG_GRKERNSEC=y
+# CONFIG_GRKERNSEC_CONFIG_AUTO is not set
+CONFIG_GRKERNSEC_CONFIG_CUSTOM=y
+
+#
+# Customize Configuration
+#
+
+#
+# PaX
+#
+CONFIG_PAX=y
+
+#
+# PaX Control
+#
+# CONFIG_PAX_SOFTMODE is not set
+CONFIG_PAX_EI_PAX=y
+CONFIG_PAX_PT_PAX_FLAGS=y
+# CONFIG_PAX_XATTR_PAX_FLAGS is not set
+# CONFIG_PAX_NO_ACL_FLAGS is not set
+CONFIG_PAX_HAVE_ACL_FLAGS=y
+# CONFIG_PAX_HOOK_ACL_FLAGS is not set
+
+#
+# Non-executable pages
+#
+CONFIG_PAX_NOEXEC=y
+CONFIG_PAX_PAGEEXEC=y
+CONFIG_PAX_SEGMEXEC=y
+CONFIG_PAX_EMUTRAMP=y
+CONFIG_PAX_MPROTECT=y
+# CONFIG_PAX_MPROTECT_COMPAT is not set
+CONFIG_PAX_ELFRELOCS=y
+CONFIG_PAX_KERNEXEC=y
+CONFIG_PAX_KERNEXEC_PLUGIN_METHOD=""
+CONFIG_PAX_KERNEXEC_MODULE_TEXT=4
+
+#
+# Address Space Layout Randomization
+#
+CONFIG_PAX_ASLR=y
+CONFIG_PAX_RANDKSTACK=y
+CONFIG_PAX_RANDUSTACK=y
+CONFIG_PAX_RANDMMAP=y
+
+#
+# Miscellaneous hardening features
+#
+CONFIG_PAX_MEMORY_STACKLEAK=y
+CONFIG_PAX_MEMORY_STRUCTLEAK=y
+CONFIG_PAX_MEMORY_UDEREF=y
+CONFIG_PAX_REFCOUNT=y
+CONFIG_PAX_CONSTIFY_PLUGIN=y
+CONFIG_PAX_USERCOPY=y
+# CONFIG_PAX_USERCOPY_DEBUG is not set
+# CONFIG_PAX_SIZE_OVERFLOW is not set
+# CONFIG_PAX_LATENT_ENTROPY is not set
+
+#
+# Memory Protections
+#
+CONFIG_GRKERNSEC_KMEM=y
+CONFIG_GRKERNSEC_VM86=y
+# CONFIG_GRKERNSEC_IO is not set
+# CONFIG_GRKERNSEC_PERF_HARDEN is not set
+CONFIG_GRKERNSEC_RAND_THREADSTACK=y
+CONFIG_GRKERNSEC_PROC_MEMMAP=y
+CONFIG_GRKERNSEC_BRUTE=y
+CONFIG_GRKERNSEC_MODHARDEN=y
+CONFIG_GRKERNSEC_HIDESYM=y
+CONFIG_GRKERNSEC_KERN_LOCKOUT=y
+
+#
+# Role Based Access Control Options
+#
+CONFIG_GRKERNSEC_NO_RBAC=y
+# CONFIG_GRKERNSEC_ACL_HIDEKERN is not set
+CONFIG_GRKERNSEC_ACL_MAXTRIES=3
+CONFIG_GRKERNSEC_ACL_TIMEOUT=30
+
+#
+# Filesystem Protections
+#
+# CONFIG_GRKERNSEC_PROC is not set
+CONFIG_GRKERNSEC_LINK=y
+# CONFIG_GRKERNSEC_SYMLINKOWN is not set
+CONFIG_GRKERNSEC_FIFO=y
+# CONFIG_GRKERNSEC_SYSFS_RESTRICT is not set
+# CONFIG_GRKERNSEC_ROFS is not set
+CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL=y
+CONFIG_GRKERNSEC_CHROOT=y
+CONFIG_GRKERNSEC_CHROOT_MOUNT=y
+CONFIG_GRKERNSEC_CHROOT_DOUBLE=y
+CONFIG_GRKERNSEC_CHROOT_PIVOT=y
+CONFIG_GRKERNSEC_CHROOT_CHDIR=y
+# CONFIG_GRKERNSEC_CHROOT_CHMOD is not set
+CONFIG_GRKERNSEC_CHROOT_FCHDIR=y
+# CONFIG_GRKERNSEC_CHROOT_MKNOD is not set
+CONFIG_GRKERNSEC_CHROOT_SHMAT=y
+CONFIG_GRKERNSEC_CHROOT_UNIX=y
+CONFIG_GRKERNSEC_CHROOT_FINDTASK=y
+CONFIG_GRKERNSEC_CHROOT_NICE=y
+CONFIG_GRKERNSEC_CHROOT_SYSCTL=y
+# CONFIG_GRKERNSEC_CHROOT_CAPS is not set
+CONFIG_GRKERNSEC_CHROOT_INITRD=y
+
+#
+# Kernel Auditing
+#
+# CONFIG_GRKERNSEC_AUDIT_GROUP is not set
+# CONFIG_GRKERNSEC_EXECLOG is not set
+CONFIG_GRKERNSEC_RESLOG=y
+# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set
+# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set
+# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set
+# CONFIG_GRKERNSEC_AUDIT_MOUNT is not set
+CONFIG_GRKERNSEC_SIGNAL=y
+CONFIG_GRKERNSEC_FORKFAIL=y
+# CONFIG_GRKERNSEC_TIME is not set
+CONFIG_GRKERNSEC_PROC_IPADDR=y
+# CONFIG_GRKERNSEC_RWXMAP_LOG is not set
+
+#
+# Executable Protections
+#
+CONFIG_GRKERNSEC_DMESG=y
+CONFIG_GRKERNSEC_HARDEN_PTRACE=y
+CONFIG_GRKERNSEC_PTRACE_READEXEC=y
+CONFIG_GRKERNSEC_SETXID=y
+# CONFIG_GRKERNSEC_TPE is not set
+
+#
+# Network Protections
+#
+CONFIG_GRKERNSEC_RANDNET=y
+CONFIG_GRKERNSEC_BLACKHOLE=y
+CONFIG_GRKERNSEC_NO_SIMULT_CONNECT=y
+# CONFIG_GRKERNSEC_SOCKET is not set
+
+#
+# Sysctl Support
+#
+# CONFIG_GRKERNSEC_SYSCTL is not set
+
+#
+# Logging Options
+#
+CONFIG_GRKERNSEC_FLOODTIME=10
+CONFIG_GRKERNSEC_FLOODBURST=6
 CONFIG_KEYS=y
 # CONFIG_ENCRYPTED_KEYS is not set
-# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-# CONFIG_SECURITY_DMESG_RESTRICT is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY_DMESG_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+# CONFIG_SECURITY_PATH is not set
 # CONFIG_INTEL_TXT is not set
+# CONFIG_SECURITY_SELINUX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
 CONFIG_DEFAULT_SECURITY_DAC=y
 CONFIG_DEFAULT_SECURITY=""
 CONFIG_XOR_BLOCKS=m
@@ -4593,58 +5297,60 @@ CONFIG_ASYNC_MEMCPY=m
 CONFIG_ASYNC_XOR=m
 CONFIG_ASYNC_PQ=m
 CONFIG_ASYNC_RAID6_RECOV=m
-CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
-CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
+CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD=y
 CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER=y
 CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
 CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG=y
 CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_PCOMP=m
 CONFIG_CRYPTO_PCOMP2=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_GF128MUL=m
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_USER is not set
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_CRYPTD=y
 CONFIG_CRYPTO_AUTHENC=m
-# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+CONFIG_CRYPTO_GLUE_HELPER_X86=m
 
 #
 # Authenticated Encryption with Associated Data
 #
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_SEQIV=y
 
 #
 # Block modes
 #
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XTS=y
 
 #
 # Hash modes
 #
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -4652,18 +5358,20 @@ CONFIG_CRYPTO_VMAC=m
 #
 # Digest
 #
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_CRC32C_INTEL=y
+CONFIG_CRYPTO_CRC32=y
+CONFIG_CRYPTO_CRC32_PCLMUL=m
 CONFIG_CRYPTO_GHASH=m
 CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
@@ -4671,23 +5379,25 @@ CONFIG_CRYPTO_WP512=m
 #
 # Ciphers
 #
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_AES_586=m
-CONFIG_CRYPTO_AES_NI_INTEL=m
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_BLOWFISH_COMMON=m
 CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST_COMMON=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SALSA20_586=m
+# CONFIG_CRYPTO_SALSA20_586 is not set
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SERPENT_SSE2_586=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_TWOFISH_COMMON=m
@@ -4696,7 +5406,7 @@ CONFIG_CRYPTO_TWOFISH_586=m
 #
 # Compression
 #
-CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEFLATE=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 
@@ -4704,47 +5414,64 @@ CONFIG_CRYPTO_LZO=m
 # Random Number Generation
 #
 CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_USER_API=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
 CONFIG_CRYPTO_HW=y
 CONFIG_CRYPTO_DEV_PADLOCK=m
 CONFIG_CRYPTO_DEV_PADLOCK_AES=m
 CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
 CONFIG_CRYPTO_DEV_GEODE=m
-CONFIG_CRYPTO_DEV_HIFN_795X=m
-CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_HAVE_KVM=y
 CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
 CONFIG_HAVE_KVM_EVENTFD=y
 CONFIG_KVM_APIC_ARCHITECTURE=y
 CONFIG_KVM_MMIO=y
 CONFIG_KVM_ASYNC_PF=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_KVM_INTEL=m
 CONFIG_KVM_AMD=m
-CONFIG_VHOST_NET=m
-CONFIG_LGUEST=m
-# CONFIG_BINARY_PRINTF is not set
+CONFIG_KVM_MMU_AUDIT=y
+CONFIG_KVM_DEVICE_ASSIGNMENT=y
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
 
 #
 # Library routines
 #
 CONFIG_RAID6_PQ=m
 CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
 CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_CRC_CCITT=m
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
 CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=m
+CONFIG_CRC_T10DIF=y
 CONFIG_CRC_ITU_T=m
 CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
 CONFIG_CRC7=m
 CONFIG_LIBCRC32C=m
 CONFIG_CRC8=m
 CONFIG_AUDIT_GENERIC=y
 CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_DEFLATE=m
 CONFIG_LZO_COMPRESS=y
 CONFIG_LZO_DECOMPRESS=y
 CONFIG_XZ_DEC=y
@@ -4761,6 +5488,7 @@ CONFIG_DECOMPRESS_BZIP2=y
 CONFIG_DECOMPRESS_LZMA=y
 CONFIG_DECOMPRESS_XZ=y
 CONFIG_DECOMPRESS_LZO=y
+CONFIG_GENERIC_ALLOCATOR=y
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_TEXTSEARCH_BM=m
@@ -4770,6 +5498,13 @@ CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_CHECK_SIGNATURE=y
 CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
 CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
 CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
 CONFIG_CORDIC=m
+# CONFIG_DDR is not set
+CONFIG_MPILIB=m
+CONFIG_OID_REGISTRY=m
+CONFIG_UCS2_STRING=y
index b9fd2aac964f3a233cb8fbe37f9d05a5709958eb..18bd29eae1278c0b6a1f6aab415ff921862c06c3 100644 (file)
@@ -1,46 +1,34 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.2.47 Kernel Configuration
+# Linux/x86 3.10.9 Kernel Configuration
 #
 # CONFIG_64BIT is not set
 CONFIG_X86_32=y
-# CONFIG_X86_64 is not set
 CONFIG_X86=y
 CONFIG_INSTRUCTION_DECODER=y
 CONFIG_OUTPUT_FORMAT="elf32-i386"
 CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_HAVE_LATENCYTOP_SUPPORT=y
 CONFIG_MMU=y
-CONFIG_ZONE_DMA=y
 CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_NEED_SG_DMA_LENGTH=y
 CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_IOMAP=y
 CONFIG_GENERIC_BUG=y
 CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_GPIO=y
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
 CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
-# CONFIG_GENERIC_TIME_VSYSCALL is not set
 CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_DEFAULT_IDLE=y
 CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
 CONFIG_HAVE_SETUP_PER_CPU_AREA=y
 CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
 CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
 CONFIG_ARCH_HIBERNATION_POSSIBLE=y
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_ZONE_DMA32 is not set
-CONFIG_ARCH_POPULATES_NODE_MAP=y
 # CONFIG_AUDIT_ARCH is not set
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -48,16 +36,15 @@ CONFIG_HAVE_INTEL_TXT=y
 CONFIG_X86_32_SMP=y
 CONFIG_X86_HT=y
 CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
-CONFIG_KTIME_SCALAR=y
 CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-CONFIG_HAVE_IRQ_WORK=y
 CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
 
 #
 # General setup
 #
-CONFIG_EXPERIMENTAL=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_CROSS_COMPILE=""
 CONFIG_LOCALVERSION=""
@@ -69,8 +56,8 @@ CONFIG_HAVE_KERNEL_XZ=y
 CONFIG_HAVE_KERNEL_LZO=y
 # CONFIG_KERNEL_GZIP is not set
 # CONFIG_KERNEL_BZIP2 is not set
-CONFIG_KERNEL_LZMA=y
-# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZMA is not set
+CONFIG_KERNEL_XZ=y
 # CONFIG_KERNEL_LZO is not set
 CONFIG_DEFAULT_HOSTNAME="(none)"
 CONFIG_SWAP=y
@@ -78,54 +65,99 @@ CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_POSIX_MQUEUE_SYSCTL=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-# CONFIG_FHANDLE is not set
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-# CONFIG_TASK_XACCT is not set
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_WATCH=y
 CONFIG_AUDIT_TREE=y
+CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
 CONFIG_HAVE_GENERIC_HARDIRQS=y
 
 #
 # IRQ subsystem
 #
 CONFIG_GENERIC_HARDIRQS=y
-CONFIG_HAVE_SPARSE_IRQ=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_IRQ_SHOW=y
 CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
 CONFIG_IRQ_FORCED_THREADING=y
 CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 
 #
 # RCU Subsystem
 #
 CONFIG_TREE_RCU=y
 # CONFIG_PREEMPT_RCU is not set
-# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_STALL_COMMON=y
 CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
 # CONFIG_RCU_FANOUT_EXACT is not set
-# CONFIG_RCU_FAST_NO_HZ is not set
+CONFIG_RCU_FAST_NO_HZ=y
 # CONFIG_TREE_RCU_TRACE is not set
+CONFIG_RCU_NOCB_CPU=y
+# CONFIG_RCU_NOCB_CPU_NONE is not set
+# CONFIG_RCU_NOCB_CPU_ZERO is not set
+CONFIG_RCU_NOCB_CPU_ALL=y
 # CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=17
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-# CONFIG_CGROUPS is not set
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
-# CONFIG_SCHED_AUTOGROUP is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_RELAY is not set
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_GZIP=y
@@ -136,17 +168,19 @@ CONFIG_RD_LZO=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
-# CONFIG_EXPERT is not set
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
 CONFIG_UID16=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_HOTPLUG=y
+CONFIG_KALLSYMS_ALL=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_PCSPKR_PLATFORM=y
-CONFIG_HAVE_PCSPKR_PLATFORM=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
@@ -155,46 +189,68 @@ CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
-# CONFIG_EMBEDDED is not set
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
 CONFIG_HAVE_PERF_EVENTS=y
 
 #
 # Kernel Performance Events And Counters
 #
 CONFIG_PERF_EVENTS=y
-# CONFIG_PERF_COUNTERS is not set
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_PCI_QUIRKS=y
 CONFIG_SLUB_DEBUG=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
+# CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
 CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
 # CONFIG_KPROBES is not set
-# CONFIG_JUMP_LABEL is not set
+CONFIG_JUMP_LABEL=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
 CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
 CONFIG_USER_RETURN_NOTIFIER=y
 CONFIG_HAVE_IOREMAP_PROT=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
 CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
 CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
 CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
 CONFIG_HAVE_DMA_API_DEBUG=y
 CONFIG_HAVE_HW_BREAKPOINT=y
 CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
 CONFIG_HAVE_USER_RETURN_NOTIFIER=y
 CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
 CONFIG_HAVE_ARCH_JUMP_LABEL=y
 CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
 
 #
 # GCOV-based kernel profiling
 #
+# CONFIG_GCOV_KERNEL is not set
 CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -202,15 +258,40 @@ CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
 # CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_MODULE_SIG is not set
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 CONFIG_LBDAF=y
 CONFIG_BLK_DEV_BSG=y
 CONFIG_BLK_DEV_BSGLIB=y
-# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
 
 #
 # IO Schedulers
@@ -218,84 +299,64 @@ CONFIG_BLK_DEV_BSGLIB=y
 CONFIG_IOSCHED_NOOP=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
 # CONFIG_DEFAULT_DEADLINE is not set
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
 CONFIG_PREEMPT_NOTIFIERS=y
 CONFIG_PADATA=y
-# CONFIG_INLINE_SPIN_TRYLOCK is not set
-# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK is not set
-# CONFIG_INLINE_SPIN_LOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-CONFIG_INLINE_SPIN_UNLOCK=y
-# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_ASN1=m
 CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
-# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_READ_TRYLOCK is not set
-# CONFIG_INLINE_READ_LOCK is not set
-# CONFIG_INLINE_READ_LOCK_BH is not set
-# CONFIG_INLINE_READ_LOCK_IRQ is not set
-# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
 CONFIG_INLINE_READ_UNLOCK=y
-# CONFIG_INLINE_READ_UNLOCK_BH is not set
 CONFIG_INLINE_READ_UNLOCK_IRQ=y
-# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_WRITE_TRYLOCK is not set
-# CONFIG_INLINE_WRITE_LOCK is not set
-# CONFIG_INLINE_WRITE_LOCK_BH is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
 CONFIG_INLINE_WRITE_UNLOCK=y
-# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
 CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
-# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
 CONFIG_MUTEX_SPIN_ON_OWNER=y
 CONFIG_FREEZER=y
 
 #
 # Processor type and features
 #
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_ZONE_DMA=y
 CONFIG_SMP=y
 CONFIG_X86_MPPARSE=y
 CONFIG_X86_BIGSMP=y
 CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
 # CONFIG_X86_WANT_INTEL_MID is not set
+CONFIG_X86_INTEL_LPSS=y
 # CONFIG_X86_RDC321X is not set
-# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_32_NON_STANDARD=y
+# CONFIG_X86_NUMAQ is not set
 CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
-# CONFIG_X86_32_IRIS is not set
+# CONFIG_STA2X11 is not set
+# CONFIG_X86_SUMMIT is not set
+# CONFIG_X86_ES7000 is not set
+CONFIG_X86_32_IRIS=m
 CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_PARAVIRT_GUEST=y
-# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+# CONFIG_PARAVIRT_SPINLOCKS is not set
 CONFIG_XEN=y
 CONFIG_XEN_DOM0=y
 CONFIG_XEN_PRIVILEGED_GUEST=y
 CONFIG_XEN_PVHVM=y
-CONFIG_XEN_MAX_DOMAIN_MEMORY=128
+CONFIG_XEN_MAX_DOMAIN_MEMORY=64
 CONFIG_XEN_SAVE_RESTORE=y
-CONFIG_KVM_CLOCK=y
+CONFIG_XEN_DEBUG_FS=y
 CONFIG_KVM_GUEST=y
-CONFIG_LGUEST_GUEST=y
-CONFIG_PARAVIRT=y
-CONFIG_PARAVIRT_SPINLOCKS=y
+# CONFIG_LGUEST_GUEST is not set
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
 CONFIG_PARAVIRT_CLOCK=y
-# CONFIG_PARAVIRT_DEBUG is not set
 CONFIG_NO_BOOTMEM=y
 # CONFIG_MEMTEST is not set
-# CONFIG_M386 is not set
 # CONFIG_M486 is not set
 # CONFIG_M586 is not set
-CONFIG_M586TSC=y
+# CONFIG_M586TSC is not set
 # CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
+CONFIG_M686=y
 # CONFIG_MPENTIUMII is not set
 # CONFIG_MPENTIUMIII is not set
 # CONFIG_MPENTIUMM is not set
@@ -317,22 +378,17 @@ CONFIG_M586TSC=y
 # CONFIG_MATOM is not set
 CONFIG_X86_GENERIC=y
 CONFIG_X86_INTERNODE_CACHE_SHIFT=6
-CONFIG_X86_CMPXCHG=y
-CONFIG_CMPXCHG_LOCAL=y
-CONFIG_CMPXCHG_DOUBLE=y
 CONFIG_X86_L1_CACHE_SHIFT=6
-CONFIG_X86_XADD=y
-CONFIG_X86_PPRO_FENCE=y
-CONFIG_X86_F00F_BUG=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
+# CONFIG_X86_PPRO_FENCE is not set
 CONFIG_X86_ALIGNMENT_16=y
 CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
 CONFIG_X86_TSC=y
 CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
 CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
 CONFIG_CPU_SUP_INTEL=y
 CONFIG_CPU_SUP_CYRIX_32=y
 CONFIG_CPU_SUP_AMD=y
@@ -344,10 +400,9 @@ CONFIG_HPET_EMULATE_RTC=y
 CONFIG_DMI=y
 CONFIG_SWIOTLB=y
 CONFIG_IOMMU_HELPER=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=32
 CONFIG_SCHED_SMT=y
 CONFIG_SCHED_MC=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
@@ -357,28 +412,34 @@ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
 CONFIG_X86_MCE=y
 CONFIG_X86_MCE_INTEL=y
 CONFIG_X86_MCE_AMD=y
-CONFIG_X86_ANCIENT_MCE=y
+# CONFIG_X86_ANCIENT_MCE is not set
 CONFIG_X86_MCE_THRESHOLD=y
-CONFIG_X86_MCE_INJECT=m
+# CONFIG_X86_MCE_INJECT is not set
 CONFIG_X86_THERMAL_VECTOR=y
 CONFIG_VM86=y
 CONFIG_TOSHIBA=m
 CONFIG_I8K=m
-CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_X86_REBOOTFIXUPS is not set
 CONFIG_MICROCODE=m
 CONFIG_MICROCODE_INTEL=y
 CONFIG_MICROCODE_AMD=y
 CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
+CONFIG_MICROCODE_INTEL_LIB=y
+CONFIG_MICROCODE_INTEL_EARLY=y
+CONFIG_MICROCODE_EARLY=y
 CONFIG_X86_CPUID=y
 # CONFIG_NOHIGHMEM is not set
 # CONFIG_HIGHMEM4G is not set
 CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
 CONFIG_PAGE_OFFSET=0xC0000000
 CONFIG_HIGHMEM=y
 CONFIG_X86_PAE=y
 CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
 CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+# CONFIG_NUMA is not set
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
@@ -390,9 +451,15 @@ CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 CONFIG_SPARSEMEM_STATIC=y
 CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_MEMORY_ISOLATION=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_COMPACTION is not set
+CONFIG_BALLOON_COMPACTION=y
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
 CONFIG_PHYS_ADDR_T_64BIT=y
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
@@ -401,19 +468,28 @@ CONFIG_MMU_NOTIFIER=y
 CONFIG_KSM=y
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
-# CONFIG_MEMORY_FAILURE is not set
-# CONFIG_TRANSPARENT_HUGEPAGE is not set
-# CONFIG_CLEANCACHE is not set
-# CONFIG_HIGHPTE is not set
-# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_CLEANCACHE=y
+# CONFIG_FRONTSWAP is not set
+CONFIG_HIGHPTE=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
 CONFIG_X86_RESERVE_LOW=64
-CONFIG_MATH_EMULATION=y
+# CONFIG_MATH_EMULATION is not set
 CONFIG_MTRR=y
-# CONFIG_MTRR_SANITIZER is not set
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
 CONFIG_X86_PAT=y
 CONFIG_ARCH_USES_PG_UNCACHED=y
 CONFIG_ARCH_RANDOM=y
-# CONFIG_EFI is not set
+CONFIG_X86_SMAP=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
 CONFIG_SECCOMP=y
 CONFIG_CC_STACKPROTECTOR=y
 # CONFIG_HZ_100 is not set
@@ -423,12 +499,14 @@ CONFIG_HZ_300=y
 CONFIG_HZ=300
 CONFIG_SCHED_HRTICK=y
 # CONFIG_KEXEC is not set
-# CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x1000000
-# CONFIG_RELOCATABLE is not set
-CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x400000
+CONFIG_RELOCATABLE=y
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x400000
 CONFIG_HOTPLUG_CPU=y
-CONFIG_COMPAT_VDSO=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
 # CONFIG_CMDLINE_BOOL is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 
@@ -438,43 +516,55 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 CONFIG_SUSPEND=y
 CONFIG_SUSPEND_FREEZER=y
 CONFIG_HIBERNATE_CALLBACKS=y
-# CONFIG_HIBERNATION is not set
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
 CONFIG_PM_SLEEP=y
 CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_WAKELOCKS is not set
 CONFIG_PM_RUNTIME=y
 CONFIG_PM=y
 # CONFIG_PM_DEBUG is not set
+CONFIG_PM_CLK=y
 CONFIG_ACPI=y
 CONFIG_ACPI_SLEEP=y
-# CONFIG_ACPI_PROCFS is not set
-CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_PROCFS=y
+# CONFIG_ACPI_PROCFS_POWER is not set
 CONFIG_ACPI_EC_DEBUGFS=m
-CONFIG_ACPI_PROC_EVENT=y
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
+# CONFIG_ACPI_PROC_EVENT is not set
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BUTTON=y
 CONFIG_ACPI_VIDEO=m
-CONFIG_ACPI_FAN=m
+CONFIG_ACPI_FAN=y
 CONFIG_ACPI_DOCK=y
-CONFIG_ACPI_PROCESSOR=m
+CONFIG_ACPI_I2C=m
+CONFIG_ACPI_PROCESSOR=y
 CONFIG_ACPI_IPMI=m
 CONFIG_ACPI_HOTPLUG_CPU=y
 CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
-CONFIG_ACPI_THERMAL=m
-CONFIG_ACPI_CUSTOM_DSDT_FILE=""
+CONFIG_ACPI_THERMAL=y
 # CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ACPI_BLACKLIST_YEAR=0
+CONFIG_ACPI_INITRD_TABLE_OVERRIDE=y
+CONFIG_ACPI_BLACKLIST_YEAR=1999
 # CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_PCI_SLOT=m
+CONFIG_ACPI_PCI_SLOT=y
 CONFIG_X86_PM_TIMER=y
-CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_CONTAINER=y
 CONFIG_ACPI_SBS=m
-# CONFIG_ACPI_HED is not set
-# CONFIG_ACPI_APEI is not set
-# CONFIG_SFI is not set
+CONFIG_ACPI_HED=y
+CONFIG_ACPI_CUSTOM_METHOD=m
+# CONFIG_ACPI_BGRT is not set
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+# CONFIG_ACPI_APEI_EINJ is not set
+# CONFIG_ACPI_APEI_ERST_DEBUG is not set
+CONFIG_SFI=y
 CONFIG_X86_APM_BOOT=y
-CONFIG_APM=m
-CONFIG_APM_IGNORE_USER_SUSPEND=y
+CONFIG_APM=y
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
 # CONFIG_APM_DO_ENABLE is not set
 CONFIG_APM_CPU_IDLE=y
 # CONFIG_APM_DISPLAY_BLANK is not set
@@ -485,46 +575,52 @@ CONFIG_APM_CPU_IDLE=y
 #
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_TABLE=y
-CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=m
 CONFIG_CPU_FREQ_STAT_DETAILS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
 # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
 CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 
 #
 # x86 CPU frequency scaling drivers
 #
-CONFIG_X86_PCC_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ=m
-CONFIG_X86_POWERNOW_K6=m
-CONFIG_X86_POWERNOW_K7=m
+CONFIG_X86_INTEL_PSTATE=y
+CONFIG_X86_PCC_CPUFREQ=y
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_ACPI_CPUFREQ_CPB is not set
+# CONFIG_X86_POWERNOW_K6 is not set
+CONFIG_X86_POWERNOW_K7=y
 CONFIG_X86_POWERNOW_K7_ACPI=y
-CONFIG_X86_POWERNOW_K8=m
-CONFIG_X86_GX_SUSPMOD=m
-CONFIG_X86_SPEEDSTEP_CENTRINO=m
-CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
-CONFIG_X86_SPEEDSTEP_ICH=m
-CONFIG_X86_SPEEDSTEP_SMI=m
-CONFIG_X86_P4_CLOCKMOD=m
-CONFIG_X86_CPUFREQ_NFORCE2=m
-CONFIG_X86_LONGRUN=m
-CONFIG_X86_LONGHAUL=m
-CONFIG_X86_E_POWERSAVER=m
+CONFIG_X86_POWERNOW_K8=y
+# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=y
+CONFIG_X86_P4_CLOCKMOD=y
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+CONFIG_X86_LONGRUN=y
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_E_POWERSAVER is not set
 
 #
 # shared options
 #
-CONFIG_X86_SPEEDSTEP_LIB=m
+CONFIG_X86_SPEEDSTEP_LIB=y
 # CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
 CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
 CONFIG_CPU_IDLE_GOV_LADDER=y
 CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
 CONFIG_INTEL_IDLE=y
 
 #
@@ -542,17 +638,22 @@ CONFIG_PCI_XEN=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_PCI_CNB20LE_QUIRK is not set
 CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCIEAER=y
-# CONFIG_PCIE_ECRC is not set
-# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIE_ECRC=y
+CONFIG_PCIEAER_INJECT=m
 CONFIG_PCIEASPM=y
 # CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+CONFIG_PCIEASPM_POWERSAVE=y
+# CONFIG_PCIEASPM_PERFORMANCE is not set
 CONFIG_PCIE_PME=y
 CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_STUB=m
-CONFIG_XEN_PCIDEV_FRONTEND=y
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+CONFIG_PCI_STUB=y
+CONFIG_XEN_PCIDEV_FRONTEND=m
 CONFIG_HT_IRQ=y
 CONFIG_PCI_ATS=y
 CONFIG_PCI_IOV=y
@@ -561,22 +662,14 @@ CONFIG_PCI_IOV=y
 CONFIG_PCI_IOAPIC=y
 CONFIG_PCI_LABEL=y
 CONFIG_ISA_DMA_API=y
-CONFIG_ISA=y
-CONFIG_EISA=y
-CONFIG_EISA_VLB_PRIMING=y
-CONFIG_EISA_PCI_EISA=y
-CONFIG_EISA_VIRTUAL_ROOT=y
-CONFIG_EISA_NAMES=y
-CONFIG_MCA=y
-CONFIG_MCA_LEGACY=y
-# CONFIG_MCA_PROC_FS is not set
-CONFIG_SCx200=m
-CONFIG_SCx200HR_TIMER=m
-CONFIG_ALIX=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
 CONFIG_AMD_NB=y
 CONFIG_PCCARD=m
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_LOAD_CIS=y
+# CONFIG_PCMCIA is not set
 CONFIG_CARDBUS=y
 
 #
@@ -588,23 +681,25 @@ CONFIG_YENTA_RICOH=y
 CONFIG_YENTA_TI=y
 CONFIG_YENTA_ENE_TUNE=y
 CONFIG_YENTA_TOSHIBA=y
-CONFIG_PD6729=m
-CONFIG_I82092=m
-CONFIG_I82365=m
-CONFIG_TCIC=m
-CONFIG_PCMCIA_PROBE=y
-CONFIG_PCCARD_NONSTATIC=y
-# CONFIG_HOTPLUG_PCI is not set
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_HOTPLUG_PCI_ACPI_IBM=m
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
 # CONFIG_RAPIDIO is not set
 
 #
 # Executable file formats / Emulations
 #
 CONFIG_BINFMT_ELF=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
 CONFIG_HAVE_AOUT=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
 CONFIG_HAVE_ATOMIC_IOMAP=y
 CONFIG_HAVE_TEXT_POKE_SMP=y
 CONFIG_NET=y
@@ -613,10 +708,13 @@ CONFIG_NET=y
 # Networking options
 #
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
 CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
 CONFIG_XFRM_MIGRATE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_XFRM_IPCOMP=m
@@ -625,7 +723,7 @@ CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
-# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_FIB_TRIE_STATS=y
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_MULTIPATH=y
 CONFIG_IP_ROUTE_VERBOSE=y
@@ -633,28 +731,31 @@ CONFIG_IP_ROUTE_CLASSID=y
 # CONFIG_IP_PNP is not set
 CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IP_TUNNEL=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
-# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
+# CONFIG_ARPD is not set
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_TUNNEL=m
 CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_LRO=y
 CONFIG_INET_DIAG=m
 CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
 CONFIG_TCP_CONG_BIC=m
-CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_CUBIC=y
 CONFIG_TCP_CONG_WESTWOOD=m
 CONFIG_TCP_CONG_HTCP=m
 CONFIG_TCP_CONG_HSTCP=m
@@ -665,13 +766,15 @@ CONFIG_TCP_CONG_LP=m
 CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_DEFAULT_RENO=y
-CONFIG_DEFAULT_TCP_CONG="reno"
-# CONFIG_TCP_MD5SIG is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-# CONFIG_IPV6_PRIVACY is not set
-# CONFIG_IPV6_ROUTER_PREF is not set
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -683,14 +786,18 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
 CONFIG_INET6_XFRM_MODE_BEET=m
 CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
 CONFIG_IPV6_SIT=m
-# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_SIT_6RD=y
 CONFIG_IPV6_NDISC_NODETYPE=y
 CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
-# CONFIG_IPV6_SUBTREES is not set
-# CONFIG_IPV6_MROUTE is not set
-# CONFIG_NETWORK_SECMARK is not set
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_DEBUG is not set
 CONFIG_NETFILTER_ADVANCED=y
@@ -700,13 +807,18 @@ CONFIG_BRIDGE_NETFILTER=y
 # Core Netfilter Configuration
 #
 CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_ACCT=m
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_MARK=y
-# CONFIG_NF_CONNTRACK_ZONES is not set
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_PROCFS=y
 CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_LABELS=y
 CONFIG_NF_CT_PROTO_DCCP=m
 CONFIG_NF_CT_PROTO_GRE=m
 CONFIG_NF_CT_PROTO_SCTP=m
@@ -723,14 +835,28 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_NF_NAT_TFTP=m
 CONFIG_NETFILTER_TPROXY=m
-CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XTABLES=y
 
 #
 # Xtables combined modules
 #
 CONFIG_NETFILTER_XT_MARK=m
 CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
 
 #
 # Xtables targets
@@ -739,20 +865,26 @@ CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
 CONFIG_NETFILTER_XT_TARGET_CT=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_IMQ=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NETMAP=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
 CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
 
@@ -760,9 +892,11 @@ CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
 # Xtables matches
 #
 CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
@@ -770,17 +904,19 @@ CONFIG_NETFILTER_XT_MATCH_CPU=m
 CONFIG_NETFILTER_XT_MATCH_DCCP=m
 CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ECN=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
 CONFIG_NETFILTER_XT_MATCH_HL=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_IPVS=m
+# CONFIG_NETFILTER_XT_MATCH_IPVS is not set
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
@@ -800,7 +936,19 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
-# CONFIG_IP_SET is not set
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_IP_VS=m
 CONFIG_IP_VS_IPV6=y
 # CONFIG_IP_VS_DEBUG is not set
@@ -830,6 +978,11 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 
+#
+# IPVS SH scheduler
+#
+CONFIG_IP_VS_SH_TAB_BITS=8
+
 #
 # IPVS application helper
 #
@@ -840,40 +993,31 @@ CONFIG_IP_VS_PE_SIP=m
 #
 # IP: Netfilter Configuration
 #
-CONFIG_NF_DEFRAG_IPV4=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_NF_NAT_NEEDED=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
 CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_NF_NAT_PROTO_DCCP=m
 CONFIG_NF_NAT_PROTO_GRE=m
-CONFIG_NF_NAT_PROTO_UDPLITE=m
-CONFIG_NF_NAT_PROTO_SCTP=m
-CONFIG_NF_NAT_FTP=m
-CONFIG_NF_NAT_IRC=m
-CONFIG_NF_NAT_TFTP=m
-CONFIG_NF_NAT_AMANDA=m
 CONFIG_NF_NAT_PPTP=m
 CONFIG_NF_NAT_H323=m
-CONFIG_NF_NAT_SIP=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
 CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
@@ -882,10 +1026,9 @@ CONFIG_IP_NF_MATCH_IPP2P=m
 #
 # IPv6: Netfilter Configuration
 #
-CONFIG_NF_DEFRAG_IPV6=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -893,13 +1036,17 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -920,49 +1067,52 @@ CONFIG_BRIDGE_EBT_MARK_T=m
 CONFIG_BRIDGE_EBT_REDIRECT=m
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_ULOG is not set
 CONFIG_BRIDGE_EBT_NFLOG=m
 # CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
+CONFIG_SCTP_COOKIE_HMAC_MD5=y
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 # CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 CONFIG_ATM=m
 CONFIG_ATM_CLIP=m
-CONFIG_ATM_CLIP_NO_ICMP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
 # CONFIG_ATM_LANE is not set
 CONFIG_ATM_BR2684=m
 # CONFIG_ATM_BR2684_IPFILTER is not set
 CONFIG_L2TP=m
+# CONFIG_L2TP_DEBUGFS is not set
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_STP=m
-CONFIG_GARP=m
-CONFIG_BRIDGE=m
+CONFIG_STP=y
+CONFIG_GARP=y
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_HAVE_NET_DSA=y
 CONFIG_NET_DSA=y
 CONFIG_NET_DSA_TAG_DSA=y
 CONFIG_NET_DSA_TAG_EDSA=y
 CONFIG_NET_DSA_TAG_TRAILER=y
-CONFIG_NET_DSA_MV88E6XXX=y
-CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123_61_65=y
-CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q=y
 CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_VLAN_8021Q_MVRP is not set
 # CONFIG_DECNET is not set
-CONFIG_LLC=m
+CONFIG_LLC=y
 # CONFIG_LLC2 is not set
 # CONFIG_IPX is not set
 # CONFIG_ATALK is not set
 # CONFIG_X25 is not set
 # CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
 # CONFIG_PHONET is not set
-CONFIG_IEEE802154=m
-CONFIG_IEEE802154_6LOWPAN=m
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
 
 #
@@ -986,7 +1136,10 @@ CONFIG_NET_SCH_DRR=m
 CONFIG_NET_SCH_MQPRIO=m
 CONFIG_NET_SCH_CHOKE=m
 CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
 
 #
 # Classification
@@ -1002,6 +1155,7 @@ CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=m
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 CONFIG_NET_EMATCH_CMP=m
@@ -1009,6 +1163,7 @@ CONFIG_NET_EMATCH_NBYTE=m
 CONFIG_NET_EMATCH_U32=m
 CONFIG_NET_EMATCH_META=m
 CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_IPSET=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
@@ -1025,67 +1180,30 @@ CONFIG_NET_SCH_FIFO=y
 # CONFIG_DCB is not set
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_BLA=y
+CONFIG_BATMAN_ADV_DAT=y
+CONFIG_BATMAN_ADV_NC=y
 # CONFIG_BATMAN_ADV_DEBUG is not set
+CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VMWARE_VMCI_VSOCKETS=m
+CONFIG_NETLINK_MMAP=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_RPS=y
 CONFIG_RFS_ACCEL=y
 CONFIG_XPS=y
+CONFIG_NETPRIO_CGROUP=m
+CONFIG_BQL=y
 
 #
 # Network testing
 #
 # CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
 # CONFIG_HAMRADIO is not set
 # CONFIG_CAN is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-# CONFIG_IRDA_ULTRA is not set
-
-#
-# IrDA options
-#
-# CONFIG_IRDA_CACHE_LAST_LSAP is not set
-# CONFIG_IRDA_FAST_RR is not set
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-# CONFIG_DONGLE is not set
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_NSC_FIR=m
-CONFIG_WINBOND_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_SMC_IRCC_FIR=m
-CONFIG_ALI_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_VIA_FIR=m
-CONFIG_MCS_FIR=m
+# CONFIG_IRDA is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -1104,21 +1222,16 @@ CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
 CONFIG_BT_HCIUART_ATH3K=y
 CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
 CONFIG_BT_HCIBCM203X=m
 CONFIG_BT_HCIBPA10X=m
 CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIDTL1=m
-CONFIG_BT_HCIBT3C=m
-CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
 CONFIG_BT_HCIVHCI=m
 CONFIG_BT_MRVL=m
 CONFIG_BT_MRVL_SDIO=m
 CONFIG_BT_ATH3K=m
 CONFIG_BT_WILINK=m
-CONFIG_AF_RXRPC=m
-# CONFIG_AF_RXRPC_DEBUG is not set
-CONFIG_RXKAD=m
+# CONFIG_AF_RXRPC is not set
 CONFIG_FIB_RULES=y
 CONFIG_WIRELESS=y
 CONFIG_WIRELESS_EXT=y
@@ -1130,10 +1243,11 @@ CONFIG_CFG80211=m
 # CONFIG_NL80211_TESTMODE is not set
 # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
 # CONFIG_CFG80211_REG_DEBUG is not set
-# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
 # CONFIG_CFG80211_INTERNAL_REGDB is not set
 CONFIG_CFG80211_WEXT=y
-CONFIG_WIRELESS_EXT_SYSFS=y
 CONFIG_LIB80211=m
 CONFIG_LIB80211_CRYPT_WEP=m
 CONFIG_LIB80211_CRYPT_CCMP=m
@@ -1141,23 +1255,24 @@ CONFIG_LIB80211_CRYPT_TKIP=m
 # CONFIG_LIB80211_DEBUG is not set
 CONFIG_MAC80211=m
 CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
 CONFIG_MAC80211_RC_MINSTREL=y
 CONFIG_MAC80211_RC_MINSTREL_HT=y
 CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
 CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
 CONFIG_MAC80211_MESH=y
 CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
 # CONFIG_MAC80211_DEBUG_MENU is not set
 # CONFIG_WIMAX is not set
 CONFIG_RFKILL=m
 CONFIG_RFKILL_LEDS=y
 CONFIG_RFKILL_INPUT=y
-CONFIG_RFKILL_REGULATOR=m
+CONFIG_RFKILL_GPIO=m
 # CONFIG_NET_9P is not set
 # CONFIG_CAIF is not set
-CONFIG_CEPH_LIB=m
-# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
-# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
+# CONFIG_CEPH_LIB is not set
 # CONFIG_NFC is not set
 
 #
@@ -1167,108 +1282,37 @@ CONFIG_CEPH_LIB=m
 #
 # Generic Driver Options
 #
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_DEVTMPFS is not set
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 CONFIG_SYS_HYPERVISOR=y
+# CONFIG_GENERIC_CPU_DEVICES is not set
 CONFIG_REGMAP=y
 CONFIG_REGMAP_I2C=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_TESTS=m
-# CONFIG_MTD_REDBOOT_PARTS is not set
-CONFIG_MTD_AR7_PARTS=m
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-# CONFIG_MTD_BLOCK_RO is not set
-# CONFIG_FTL is not set
-# CONFIG_NFTL is not set
-# CONFIG_INFTL is not set
-# CONFIG_RFD_FTL is not set
-# CONFIG_SSFDC is not set
-CONFIG_SM_FTL=m
-# CONFIG_MTD_OOPS is not set
-# CONFIG_MTD_SWAP is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-# CONFIG_MTD_CFI is not set
-# CONFIG_MTD_JEDECPROBE is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_RAM is not set
-# CONFIG_MTD_ROM is not set
-# CONFIG_MTD_ABSENT is not set
-
-#
-# Mapping drivers for chip access
-#
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_PHYSMAP=m
-# CONFIG_MTD_PHYSMAP_COMPAT is not set
-# CONFIG_MTD_TS5500 is not set
-# CONFIG_MTD_INTEL_VR_NOR is not set
-# CONFIG_MTD_PLATRAM is not set
-
-#
-# Self-contained MTD device drivers
-#
-# CONFIG_MTD_PMC551 is not set
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLOCK2MTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-# CONFIG_MTD_DOC2000 is not set
-# CONFIG_MTD_DOC2001 is not set
-# CONFIG_MTD_DOC2001PLUS is not set
-# CONFIG_MTD_DOCG3 is not set
-CONFIG_MTD_NAND_ECC=m
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND is not set
-# CONFIG_MTD_ONENAND is not set
-
-#
-# LPDDR flash memory drivers
-#
-CONFIG_MTD_LPDDR=m
-CONFIG_MTD_QINFO_PROBE=m
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_RESERVE=1
-# CONFIG_MTD_UBI_GLUEBI is not set
-# CONFIG_MTD_UBI_DEBUG is not set
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
 CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_PC_FIFO is not set
+# CONFIG_PARPORT_PC_SUPERIO is not set
 # CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
+# CONFIG_PARPORT_AX88796 is not set
 CONFIG_PARPORT_1284=y
 CONFIG_PARPORT_NOT_PC=y
 CONFIG_PNP=y
@@ -1277,69 +1321,74 @@ CONFIG_PNP=y
 #
 # Protocols
 #
-CONFIG_ISAPNP=y
-CONFIG_PNPBIOS=y
-CONFIG_PNPBIOS_PROC_FS=y
 CONFIG_PNPACPI=y
 CONFIG_BLK_DEV=y
 CONFIG_BLK_DEV_FD=m
-# CONFIG_BLK_DEV_XD is not set
 # CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
 CONFIG_BLK_CPQ_DA=m
 CONFIG_BLK_CPQ_CISS_DA=m
 # CONFIG_CISS_SCSI_TAPE is not set
 CONFIG_BLK_DEV_DAC960=m
-# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_UMEM=m
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 # CONFIG_BLK_DEV_DRBD is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_NVME=m
+# CONFIG_BLK_DEV_OSD is not set
 CONFIG_BLK_DEV_SX8=m
-# CONFIG_BLK_DEV_UB is not set
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=8
+CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=16384
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
-CONFIG_ATA_OVER_ETH=m
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_XEN_BLKDEV_FRONTEND=m
+CONFIG_XEN_BLKDEV_BACKEND=m
 CONFIG_VIRTIO_BLK=m
 # CONFIG_BLK_DEV_HD is not set
-CONFIG_BLK_DEV_RBD=m
+# CONFIG_BLK_DEV_RBD is not set
+CONFIG_BLK_DEV_RSXX=m
+
+#
+# Misc devices
+#
 CONFIG_SENSORS_LIS3LV02D=m
-CONFIG_MISC_DEVICES=y
 # CONFIG_AD525X_DPOT is not set
-# CONFIG_IBM_ASM is not set
+# CONFIG_ATMEL_PWM is not set
+CONFIG_DUMMY_IRQ=m
+CONFIG_IBM_ASM=m
 # CONFIG_PHANTOM is not set
 # CONFIG_INTEL_MID_PTI is not set
 # CONFIG_SGI_IOC4 is not set
 CONFIG_TIFM_CORE=m
 CONFIG_TIFM_7XX1=m
 CONFIG_ICS932S401=m
+# CONFIG_ATMEL_SSC is not set
 CONFIG_ENCLOSURE_SERVICES=m
 CONFIG_CS5535_MFGPT=m
 CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7
 CONFIG_CS5535_CLOCK_EVENT_SRC=m
 CONFIG_HP_ILO=m
-CONFIG_APDS9802ALS=m
-CONFIG_ISL29003=m
-CONFIG_ISL29020=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_SENSORS_BH1780=m
-CONFIG_SENSORS_BH1770=m
-CONFIG_SENSORS_APDS990X=m
-CONFIG_HMC6352=m
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
 CONFIG_DS1682=m
 CONFIG_VMWARE_BALLOON=m
-CONFIG_BMP085=m
+CONFIG_BMP085=y
+CONFIG_BMP085_I2C=m
 CONFIG_PCH_PHUB=m
 CONFIG_USB_SWITCH_FSA9480=m
-CONFIG_C2PORT=m
-CONFIG_C2PORT_DURAMAR_2150=m
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
 
 #
 # EEPROM support
@@ -1351,62 +1400,63 @@ CONFIG_EEPROM_93CX6=m
 CONFIG_CB710_CORE=m
 # CONFIG_CB710_DEBUG is not set
 CONFIG_CB710_DEBUG_ASSUMPTIONS=y
-CONFIG_IWMC3200TOP=m
-# CONFIG_IWMC3200TOP_DEBUG is not set
-# CONFIG_IWMC3200TOP_DEBUGFS is not set
 
 #
 # Texas Instruments shared transport line discipline
 #
 CONFIG_TI_ST=m
-CONFIG_SENSORS_LIS3_I2C=m
+# CONFIG_SENSORS_LIS3_I2C is not set
 
 #
 # Altera FPGA firmware download module
 #
-# CONFIG_ALTERA_STAPL is not set
+CONFIG_ALTERA_STAPL=m
+CONFIG_INTEL_MEI=m
+CONFIG_INTEL_MEI_ME=m
+CONFIG_VMWARE_VMCI=m
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
 # SCSI device support
 #
-CONFIG_SCSI_MOD=m
+CONFIG_SCSI_MOD=y
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
+CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
-# CONFIG_SCSI_TGT is not set
+CONFIG_SCSI_TGT=m
 CONFIG_SCSI_NETLINK=y
 CONFIG_SCSI_PROC_FS=y
 
 #
 # SCSI support type (disk, tape, CD-ROM)
 #
-CONFIG_BLK_DEV_SD=m
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
 CONFIG_SCSI_ENCLOSURE=m
 CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-# CONFIG_SCSI_SCAN_ASYNC is not set
-CONFIG_SCSI_WAIT_SCAN=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
 
 #
 # SCSI Transports
 #
 CONFIG_SCSI_SPI_ATTRS=m
 CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_TGT_ATTRS=y
 CONFIG_SCSI_ISCSI_ATTRS=m
 CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
 CONFIG_SCSI_SAS_ATA=y
 CONFIG_SCSI_SAS_HOST_SMP=y
 CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
 CONFIG_SCSI_LOWLEVEL=y
 CONFIG_ISCSI_TCP=m
 CONFIG_ISCSI_BOOT_SYSFS=m
@@ -1419,36 +1469,29 @@ CONFIG_BLK_DEV_3W_XXXX_RAID=m
 CONFIG_SCSI_HPSA=m
 CONFIG_SCSI_3W_9XXX=m
 CONFIG_SCSI_3W_SAS=m
-CONFIG_SCSI_7000FASST=m
 CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AHA152X=m
-CONFIG_SCSI_AHA1542=m
-CONFIG_SCSI_AHA1740=m
 CONFIG_SCSI_AACRAID=m
 CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=5000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
 CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 CONFIG_SCSI_AIC79XX=m
 CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+CONFIG_AIC79XX_RESET_DELAY_MS=4000
 # CONFIG_AIC79XX_DEBUG_ENABLE is not set
 CONFIG_AIC79XX_DEBUG_MASK=0
 # CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
 CONFIG_SCSI_AIC94XX=m
 # CONFIG_AIC94XX_DEBUG is not set
 CONFIG_SCSI_MVSAS=m
-CONFIG_SCSI_MVSAS_DEBUG=y
-# CONFIG_SCSI_MVSAS_TASKLET is not set
-CONFIG_SCSI_MVUMI=m
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+# CONFIG_SCSI_MVUMI is not set
 CONFIG_SCSI_DPT_I2O=m
 CONFIG_SCSI_ADVANSYS=m
-CONFIG_SCSI_IN2000=m
 CONFIG_SCSI_ARCMSR=m
 CONFIG_MEGARAID_NEWGEN=y
 CONFIG_MEGARAID_MM=m
@@ -1458,30 +1501,28 @@ CONFIG_MEGARAID_SAS=m
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 # CONFIG_SCSI_MPT2SAS_LOGGING is not set
+CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_MPT3SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT3SAS_LOGGING is not set
+CONFIG_SCSI_UFSHCD=m
+CONFIG_SCSI_UFSHCD_PCI=m
+CONFIG_SCSI_UFSHCD_PLATFORM=m
 CONFIG_SCSI_HPTIOP=m
 CONFIG_SCSI_BUSLOGIC=m
-CONFIG_SCSI_FLASHPOINT=y
+# CONFIG_SCSI_FLASHPOINT is not set
 CONFIG_VMWARE_PVSCSI=m
 CONFIG_LIBFC=m
 CONFIG_LIBFCOE=m
 CONFIG_FCOE=m
 CONFIG_FCOE_FNIC=m
 CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_DTC3280=m
 CONFIG_SCSI_EATA=m
 CONFIG_SCSI_EATA_TAGGED_QUEUE=y
-CONFIG_SCSI_EATA_LINKED_COMMANDS=y
+# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set
 CONFIG_SCSI_EATA_MAX_TAGS=16
 CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_FD_MCS=m
 CONFIG_SCSI_GDTH=m
 CONFIG_SCSI_ISCI=m
-CONFIG_SCSI_GENERIC_NCR5380=m
-CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
-CONFIG_SCSI_GENERIC_NCR53C400=y
-CONFIG_SCSI_IBMMCA=m
-# CONFIG_IBMMCA_SCSI_ORDER_STANDARD is not set
-# CONFIG_IBMMCA_SCSI_DEV_RESET is not set
 CONFIG_SCSI_IPS=m
 CONFIG_SCSI_INITIO=m
 CONFIG_SCSI_INIA100=m
@@ -1489,8 +1530,6 @@ CONFIG_SCSI_PPA=m
 CONFIG_SCSI_IMM=m
 # CONFIG_SCSI_IZIP_EPP16 is not set
 # CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_NCR53C406A=m
-CONFIG_SCSI_NCR_D700=m
 CONFIG_SCSI_STEX=m
 CONFIG_SCSI_SYM53C8XX_2=m
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
@@ -1500,39 +1539,22 @@ CONFIG_SCSI_SYM53C8XX_MMIO=y
 CONFIG_SCSI_IPR=m
 CONFIG_SCSI_IPR_TRACE=y
 CONFIG_SCSI_IPR_DUMP=y
-CONFIG_SCSI_NCR_Q720=m
-CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
-CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
-CONFIG_SCSI_NCR53C8XX_SYNC=20
-CONFIG_SCSI_PAS16=m
-CONFIG_SCSI_QLOGIC_FAS=m
 CONFIG_SCSI_QLOGIC_1280=m
 CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
-CONFIG_SCSI_SIM710=m
-CONFIG_SCSI_SYM53C416=m
+# CONFIG_SCSI_LPFC_DEBUG_FS is not set
 CONFIG_SCSI_DC395x=m
 CONFIG_SCSI_DC390T=m
-CONFIG_SCSI_T128=m
-CONFIG_SCSI_U14_34F=m
-CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
-CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
-CONFIG_SCSI_U14_34F_MAX_TAGS=8
-CONFIG_SCSI_ULTRASTOR=m
-CONFIG_SCSI_NSP32=m
+# CONFIG_SCSI_NSP32 is not set
 # CONFIG_SCSI_DEBUG is not set
 CONFIG_SCSI_PMCRAID=m
 CONFIG_SCSI_PM8001=m
 # CONFIG_SCSI_SRP is not set
 CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_LOWLEVEL_PCMCIA=y
-CONFIG_PCMCIA_AHA152X=m
-CONFIG_PCMCIA_FDOMAIN=m
-CONFIG_PCMCIA_NINJA_SCSI=m
-CONFIG_PCMCIA_QLOGIC=m
-CONFIG_PCMCIA_SYM53C500=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_CHELSIO_FCOE=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
@@ -1541,16 +1563,17 @@ CONFIG_SCSI_OSD_INITIATOR=m
 CONFIG_SCSI_OSD_ULD=m
 CONFIG_SCSI_OSD_DPRINT_SENSE=1
 # CONFIG_SCSI_OSD_DEBUG is not set
-CONFIG_ATA=m
+CONFIG_ATA=y
 # CONFIG_ATA_NONSTANDARD is not set
 CONFIG_ATA_VERBOSE_ERROR=y
 CONFIG_ATA_ACPI=y
+# CONFIG_SATA_ZPODD is not set
 CONFIG_SATA_PMP=y
 
 #
 # Controllers with non-SFF native interface
 #
-CONFIG_SATA_AHCI=m
+CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=m
 CONFIG_SATA_INIC162X=m
 CONFIG_SATA_ACARD_AHCI=m
@@ -1568,7 +1591,8 @@ CONFIG_ATA_BMDMA=y
 #
 # SATA SFF controllers with BMDMA
 #
-CONFIG_ATA_PIIX=m
+CONFIG_ATA_PIIX=y
+CONFIG_SATA_HIGHBANK=m
 CONFIG_SATA_MV=m
 CONFIG_SATA_NV=m
 CONFIG_SATA_PROMISE=m
@@ -1591,7 +1615,7 @@ CONFIG_PATA_ATP867X=m
 CONFIG_PATA_CMD64X=m
 CONFIG_PATA_CS5520=m
 CONFIG_PATA_CS5530=m
-CONFIG_PATA_CS5535=m
+# CONFIG_PATA_CS5535 is not set
 CONFIG_PATA_CS5536=m
 CONFIG_PATA_CYPRESS=m
 CONFIG_PATA_EFAR=m
@@ -1599,7 +1623,7 @@ CONFIG_PATA_HPT366=m
 CONFIG_PATA_HPT37X=m
 CONFIG_PATA_HPT3X2N=m
 CONFIG_PATA_HPT3X3=m
-CONFIG_PATA_HPT3X3_DMA=y
+# CONFIG_PATA_HPT3X3_DMA is not set
 CONFIG_PATA_IT8213=m
 CONFIG_PATA_IT821X=m
 CONFIG_PATA_JMICRON=m
@@ -1611,9 +1635,9 @@ CONFIG_PATA_OLDPIIX=m
 CONFIG_PATA_OPTIDMA=m
 CONFIG_PATA_PDC2027X=m
 CONFIG_PATA_PDC_OLD=m
-CONFIG_PATA_RADISYS=m
+# CONFIG_PATA_RADISYS is not set
 CONFIG_PATA_RDC=m
-CONFIG_PATA_SC1200=m
+# CONFIG_PATA_SC1200 is not set
 CONFIG_PATA_SCH=m
 CONFIG_PATA_SERVERWORKS=m
 CONFIG_PATA_SIL680=m
@@ -1627,91 +1651,106 @@ CONFIG_PATA_WINBOND=m
 # PIO-only SFF controllers
 #
 CONFIG_PATA_CMD640_PCI=m
-CONFIG_PATA_ISAPNP=m
 CONFIG_PATA_MPIIX=m
 CONFIG_PATA_NS87410=m
 CONFIG_PATA_OPTI=m
-CONFIG_PATA_PCMCIA=m
-CONFIG_PATA_QDI=m
+# CONFIG_PATA_PLATFORM is not set
 CONFIG_PATA_RZ1000=m
-CONFIG_PATA_WINBOND_VLB=m
 
 #
 # Generic fallback / legacy drivers
 #
 CONFIG_PATA_ACPI=m
 CONFIG_ATA_GENERIC=m
-CONFIG_PATA_LEGACY=m
+# CONFIG_PATA_LEGACY is not set
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
 CONFIG_MD_RAID1=m
 CONFIG_MD_RAID10=m
 CONFIG_MD_RAID456=m
-# CONFIG_MULTICORE_RAID456 is not set
 CONFIG_MD_MULTIPATH=m
-# CONFIG_MD_FAULTY is not set
-CONFIG_BLK_DEV_DM=m
+CONFIG_MD_FAULTY=m
+CONFIG_BCACHE=m
+# CONFIG_BCACHE_DEBUG is not set
+# CONFIG_BCACHE_EDEBUG is not set
+# CONFIG_BCACHE_CLOSURES_DEBUG is not set
+CONFIG_BLK_DEV_DM=y
 # CONFIG_DM_DEBUG is not set
 CONFIG_DM_BUFIO=m
+CONFIG_DM_BIO_PRISON=m
 CONFIG_DM_PERSISTENT_DATA=m
 CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_THIN_PROVISIONING=m
-# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
-# CONFIG_DM_DEBUG_SPACE_MAPS is not set
-CONFIG_DM_MIRROR=m
+CONFIG_DM_SNAPSHOT=y
+# CONFIG_DM_THIN_PROVISIONING is not set
+CONFIG_DM_CACHE=m
+CONFIG_DM_CACHE_MQ=m
+CONFIG_DM_CACHE_CLEANER=m
+CONFIG_DM_MIRROR=y
 CONFIG_DM_RAID=m
 CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_ZERO=m
+CONFIG_DM_ZERO=y
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
+# CONFIG_DM_DELAY is not set
 CONFIG_DM_UEVENT=y
-CONFIG_DM_FLAKEY=m
+# CONFIG_DM_FLAKEY is not set
+CONFIG_DM_VERITY=m
 # CONFIG_TARGET_CORE is not set
 CONFIG_FUSION=y
 CONFIG_FUSION_SPI=m
 CONFIG_FUSION_FC=m
 CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_MAX_SGE=40
 CONFIG_FUSION_CTL=m
-# CONFIG_FUSION_LOGGING is not set
+CONFIG_FUSION_LOGGING=y
 
 #
 # IEEE 1394 (FireWire) support
 #
 CONFIG_FIREWIRE=m
 CONFIG_FIREWIRE_OHCI=m
-CONFIG_FIREWIRE_OHCI_DEBUG=y
 CONFIG_FIREWIRE_SBP2=m
-CONFIG_FIREWIRE_NET=m
+# CONFIG_FIREWIRE_NET is not set
 # CONFIG_FIREWIRE_NOSY is not set
-# CONFIG_I2O is not set
-# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_I2O=m
+# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set
+CONFIG_I2O_EXT_ADAPTEC=y
+CONFIG_I2O_EXT_ADAPTEC_DMA64=y
+CONFIG_I2O_CONFIG=m
+CONFIG_I2O_CONFIG_OLD_IOCTL=y
+CONFIG_I2O_BUS=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+CONFIG_MACINTOSH_DRIVERS=y
+# CONFIG_MAC_EMUMOUSEBTN is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_CORE=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=m
+CONFIG_IFB=m
+# CONFIG_NET_TEAM is not set
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
 CONFIG_IMQ=m
 # CONFIG_IMQ_BEHAVIOR_AA is not set
 CONFIG_IMQ_BEHAVIOR_AB=y
 # CONFIG_IMQ_BEHAVIOR_BA is not set
 # CONFIG_IMQ_BEHAVIOR_BB is not set
 CONFIG_IMQ_NUM_DEVS=2
-# CONFIG_NET_FC is not set
-CONFIG_MII=m
-CONFIG_IEEE802154_DRIVERS=m
-CONFIG_IEEE802154_FAKEHARD=m
-CONFIG_IFB=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -1722,51 +1761,42 @@ CONFIG_ATM_DRIVERS=y
 CONFIG_ATM_TCP=m
 CONFIG_ATM_LANAI=m
 CONFIG_ATM_ENI=m
-CONFIG_ATM_ENI_DEBUG=y
-CONFIG_ATM_ENI_TUNE_BURST=y
-CONFIG_ATM_ENI_BURST_TX_16W=y
-CONFIG_ATM_ENI_BURST_TX_8W=y
-CONFIG_ATM_ENI_BURST_TX_4W=y
-CONFIG_ATM_ENI_BURST_TX_2W=y
-CONFIG_ATM_ENI_BURST_RX_16W=y
-CONFIG_ATM_ENI_BURST_RX_8W=y
-CONFIG_ATM_ENI_BURST_RX_4W=y
-CONFIG_ATM_ENI_BURST_RX_2W=y
+# CONFIG_ATM_ENI_DEBUG is not set
+# CONFIG_ATM_ENI_TUNE_BURST is not set
 CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-# CONFIG_ATM_ZATM_DEBUG is not set
+# CONFIG_ATM_ZATM is not set
 CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_NICSTAR_USE_SUNI=y
-CONFIG_ATM_NICSTAR_USE_IDT77105=y
+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
 CONFIG_ATM_IDT77252=m
 # CONFIG_ATM_IDT77252_DEBUG is not set
 # CONFIG_ATM_IDT77252_RCV_ALL is not set
 CONFIG_ATM_IDT77252_USE_SUNI=y
-CONFIG_ATM_AMBASSADOR=m
-# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-CONFIG_ATM_HORIZON=m
-# CONFIG_ATM_HORIZON_DEBUG is not set
-CONFIG_ATM_IA=m
-# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_FORE200E_USE_TASKLET=y
-CONFIG_ATM_FORE200E_TX_RETRY=16
-CONFIG_ATM_FORE200E_DEBUG=0
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_FORE200E is not set
 CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
+# CONFIG_ATM_HE_USE_SUNI is not set
 CONFIG_ATM_SOLOS=m
 
 #
 # CAIF transport drivers
 #
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_RING=m
+
+#
+# Distributed Switch Architecture drivers
+#
+CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_NET_DSA_MV88E6060=y
+CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
+CONFIG_NET_DSA_MV88E6131=y
+CONFIG_NET_DSA_MV88E6123_61_65=y
 CONFIG_ETHERNET=y
 CONFIG_MDIO=m
 CONFIG_NET_VENDOR_3COM=y
-CONFIG_EL1=m
-CONFIG_EL3=m
-CONFIG_3C515=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
 CONFIG_VORTEX=m
 CONFIG_TYPHOON=m
 CONFIG_NET_VENDOR_ADAPTEC=y
@@ -1776,16 +1806,16 @@ CONFIG_ACENIC=m
 # CONFIG_ACENIC_OMIT_TIGON_I is not set
 CONFIG_NET_VENDOR_AMD=y
 CONFIG_AMD8111_ETH=m
-CONFIG_LANCE=m
 CONFIG_PCNET32=m
-CONFIG_DEPCA=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_NI65=m
 CONFIG_NET_VENDOR_ATHEROS=y
 CONFIG_ATL2=m
 CONFIG_ATL1=m
 CONFIG_ATL1E=m
 CONFIG_ATL1C=m
+CONFIG_ALX=m
+CONFIG_NET_CADENCE=y
+CONFIG_ARM_AT91_ETHER=m
+CONFIG_MACB=m
 CONFIG_NET_VENDOR_BROADCOM=y
 CONFIG_B44=m
 CONFIG_B44_PCI_AUTOSELECT=y
@@ -1795,36 +1825,34 @@ CONFIG_BNX2=m
 CONFIG_CNIC=m
 CONFIG_TIGON3=m
 CONFIG_BNX2X=m
+CONFIG_BNX2X_SRIOV=y
 CONFIG_NET_VENDOR_BROCADE=y
 CONFIG_BNA=m
+CONFIG_NET_CALXEDA_XGMAC=m
 CONFIG_NET_VENDOR_CHELSIO=y
 CONFIG_CHELSIO_T1=m
 CONFIG_CHELSIO_T1_1G=y
 CONFIG_CHELSIO_T3=m
 CONFIG_CHELSIO_T4=m
 CONFIG_CHELSIO_T4VF=m
-CONFIG_NET_VENDOR_CIRRUS=y
-CONFIG_CS89x0=m
 CONFIG_NET_VENDOR_CISCO=y
 CONFIG_ENIC=m
 CONFIG_DNET=m
 CONFIG_NET_VENDOR_DEC=y
-CONFIG_EWRK3=m
 CONFIG_NET_TULIP=y
 CONFIG_DE2104X=m
 CONFIG_DE2104X_DSL=0
 CONFIG_TULIP=m
 # CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
+CONFIG_TULIP_MMIO=y
+CONFIG_TULIP_NAPI=y
+CONFIG_TULIP_NAPI_HW_MITIGATION=y
 CONFIG_DE4X5=m
 CONFIG_WINBOND_840=m
 CONFIG_DM9102=m
 CONFIG_ULI526X=m
 CONFIG_PCMCIA_XIRCOM=m
 CONFIG_NET_VENDOR_DLINK=y
-CONFIG_DE600=m
-CONFIG_DE620=m
 CONFIG_DL2K=m
 CONFIG_SUNDANCE=m
 # CONFIG_SUNDANCE_MMIO is not set
@@ -1834,126 +1862,89 @@ CONFIG_NET_VENDOR_EXAR=y
 CONFIG_S2IO=m
 CONFIG_VXGE=m
 # CONFIG_VXGE_DEBUG_TRACE_ALL is not set
-CONFIG_NET_VENDOR_FUJITSU=y
-CONFIG_AT1700=m
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_ETH16I=m
 CONFIG_NET_VENDOR_HP=y
 CONFIG_HP100=m
-CONFIG_NET_VENDOR_IBM=y
-# CONFIG_IBM_EMAC_ZMII is not set
-# CONFIG_IBM_EMAC_RGMII is not set
-# CONFIG_IBM_EMAC_TAH is not set
-# CONFIG_IBM_EMAC_EMAC4 is not set
-# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set
-# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set
-# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_VENDOR_INTEL=y
 CONFIG_E100=m
 CONFIG_E1000=m
 CONFIG_E1000E=m
 CONFIG_IGB=m
+CONFIG_IGB_HWMON=y
 CONFIG_IGB_DCA=y
 CONFIG_IGBVF=m
 CONFIG_IXGB=m
 CONFIG_IXGBE=m
+CONFIG_IXGBE_HWMON=y
 CONFIG_IXGBE_DCA=y
 CONFIG_IXGBEVF=m
 CONFIG_NET_VENDOR_I825XX=y
-CONFIG_ELPLUS=m
-CONFIG_EL16=m
-CONFIG_ELMC=m
-CONFIG_ELMC_II=m
-CONFIG_APRICOT=m
-CONFIG_EEXPRESS=m
-CONFIG_EEXPRESS_PRO=m
-CONFIG_LP486E=m
-CONFIG_NI52=m
-CONFIG_ZNET=m
 CONFIG_IP1000=m
 CONFIG_JME=m
 CONFIG_NET_VENDOR_MARVELL=y
+CONFIG_MVMDIO=m
 CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
 CONFIG_SKGE_GENESIS=y
 CONFIG_SKY2=m
+# CONFIG_SKY2_DEBUG is not set
 CONFIG_NET_VENDOR_MELLANOX=y
 CONFIG_MLX4_EN=m
 CONFIG_MLX4_CORE=m
 CONFIG_MLX4_DEBUG=y
 CONFIG_NET_VENDOR_MICREL=y
-CONFIG_KS8842=m
-CONFIG_KS8851_MLL=m
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
 CONFIG_KSZ884X_PCI=m
 CONFIG_NET_VENDOR_MYRI=y
 CONFIG_MYRI10GE=m
 CONFIG_MYRI10GE_DCA=y
 CONFIG_FEALNX=m
 CONFIG_NET_VENDOR_NATSEMI=y
-CONFIG_IBMLANA=m
 CONFIG_NATSEMI=m
 CONFIG_NS83820=m
 CONFIG_NET_VENDOR_8390=y
-CONFIG_EL2=m
-CONFIG_AC3200=m
-CONFIG_PCMCIA_AXNET=m
-CONFIG_E2100=m
-CONFIG_ES3210=m
-CONFIG_HPLAN_PLUS=m
-CONFIG_HPLAN=m
-CONFIG_LNE390=m
-CONFIG_NE2000=m
-CONFIG_NE2_MCA=m
 CONFIG_NE2K_PCI=m
-CONFIG_NE3210=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_ULTRAMCA=m
-CONFIG_ULTRA=m
-CONFIG_ULTRA32=m
-CONFIG_WD80x3=m
 CONFIG_NET_VENDOR_NVIDIA=y
 CONFIG_FORCEDETH=m
 CONFIG_NET_VENDOR_OKI=y
 CONFIG_PCH_GBE=m
 CONFIG_ETHOC=m
-CONFIG_NET_PACKET_ENGINE=y
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
+# CONFIG_NET_PACKET_ENGINE is not set
 CONFIG_NET_VENDOR_QLOGIC=y
 CONFIG_QLA3XXX=m
 CONFIG_QLCNIC=m
+CONFIG_QLCNIC_SRIOV=y
 CONFIG_QLGE=m
 CONFIG_NETXEN_NIC=m
-CONFIG_NET_VENDOR_RACAL=y
 CONFIG_NET_VENDOR_REALTEK=y
 CONFIG_ATP=m
 CONFIG_8139CP=m
 CONFIG_8139TOO=m
 # CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
 CONFIG_8139TOO_8129=y
 # CONFIG_8139_OLD_RX_RESET is not set
 CONFIG_R8169=m
 CONFIG_NET_VENDOR_RDC=y
 CONFIG_R6040=m
 CONFIG_NET_VENDOR_SEEQ=y
-CONFIG_SEEQ8005=m
 CONFIG_NET_VENDOR_SILAN=y
 CONFIG_SC92031=m
 CONFIG_NET_VENDOR_SIS=y
 CONFIG_SIS900=m
 CONFIG_SIS190=m
 CONFIG_SFC=m
-CONFIG_SFC_MTD=y
+CONFIG_SFC_MCDI_MON=y
+CONFIG_SFC_SRIOV=y
 CONFIG_NET_VENDOR_SMSC=y
-CONFIG_SMC9194=m
-CONFIG_PCMCIA_SMC91C92=m
 CONFIG_EPIC100=m
 CONFIG_SMSC9420=m
 CONFIG_NET_VENDOR_STMICRO=y
 CONFIG_STMMAC_ETH=m
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_STMMAC_PCI=y
+# CONFIG_STMMAC_DEBUG_FS is not set
 # CONFIG_STMMAC_DA is not set
-CONFIG_STMMAC_RING=y
-# CONFIG_STMMAC_CHAINED is not set
 CONFIG_NET_VENDOR_SUN=y
 CONFIG_HAPPYMEAL=m
 CONFIG_SUNGEM=m
@@ -1967,16 +1958,22 @@ CONFIG_NET_VENDOR_VIA=y
 CONFIG_VIA_RHINE=m
 CONFIG_VIA_RHINE_MMIO=y
 CONFIG_VIA_VELOCITY=m
-CONFIG_NET_VENDOR_XIRCOM=y
-CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_NET_VENDOR_WIZNET=y
+CONFIG_WIZNET_W5100=m
+CONFIG_WIZNET_W5300=m
+# CONFIG_WIZNET_BUS_DIRECT is not set
+# CONFIG_WIZNET_BUS_INDIRECT is not set
+CONFIG_WIZNET_BUS_ANY=y
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
-CONFIG_NET_SB1000=m
+# CONFIG_NET_SB1000 is not set
 CONFIG_PHYLIB=y
 
 #
 # MII PHY device drivers
 #
+CONFIG_AT803X_PHY=m
+CONFIG_AMD_PHY=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -1985,15 +1982,16 @@ CONFIG_CICADA_PHY=m
 CONFIG_VITESSE_PHY=m
 CONFIG_SMSC_PHY=m
 CONFIG_BROADCOM_PHY=m
+CONFIG_BCM87XX_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
 CONFIG_NATIONAL_PHY=m
 CONFIG_STE10XP=m
 CONFIG_LSI_ET1011C_PHY=m
 CONFIG_MICREL_PHY=m
-# CONFIG_FIXED_PHY is not set
+CONFIG_FIXED_PHY=y
 CONFIG_MDIO_BITBANG=m
-CONFIG_MDIO_GPIO=m
+# CONFIG_MDIO_GPIO is not set
 # CONFIG_PLIP is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -2007,12 +2005,8 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-CONFIG_SLIP=m
+# CONFIG_SLIP is not set
 CONFIG_SLHC=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-# CONFIG_TR is not set
 
 #
 # USB Network Adapters
@@ -2021,11 +2015,14 @@ CONFIG_USB_CATC=m
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
 CONFIG_USB_USBNET=m
 CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_AX88179_178A=m
 CONFIG_USB_NET_CDCETHER=m
 CONFIG_USB_NET_CDC_EEM=m
 CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
 CONFIG_USB_NET_DM9601=m
 CONFIG_USB_NET_SMSC75XX=m
 CONFIG_USB_NET_SMSC95XX=m
@@ -2044,91 +2041,172 @@ CONFIG_USB_KC2190=y
 CONFIG_USB_NET_ZAURUS=m
 CONFIG_USB_NET_CX82310_ETH=m
 CONFIG_USB_NET_KALMIA=m
+CONFIG_USB_NET_QMI_WWAN=m
 CONFIG_USB_HSO=m
 CONFIG_USB_NET_INT51X1=m
-# CONFIG_USB_IPHETH is not set
+CONFIG_USB_IPHETH=m
 CONFIG_USB_SIERRA_NET=m
 CONFIG_USB_VL600=m
 CONFIG_WLAN=y
-CONFIG_PCMCIA_RAYCS=m
-# CONFIG_LIBERTAS_THINFIRM is not set
+CONFIG_LIBERTAS_THINFIRM=m
+# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
+CONFIG_LIBERTAS_THINFIRM_USB=m
 CONFIG_AIRO=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
-CONFIG_PCMCIA_ATMEL=m
 CONFIG_AT76C50X_USB=m
-CONFIG_AIRO_CS=m
-CONFIG_PCMCIA_WL3501=m
 # CONFIG_PRISM54 is not set
 CONFIG_USB_ZD1201=m
 CONFIG_USB_NET_RNDIS_WLAN=m
-# CONFIG_RTL8180 is not set
-# CONFIG_RTL8187 is not set
-# CONFIG_ADM8211 is not set
-# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_MWL8K is not set
-# CONFIG_ATH_COMMON is not set
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_ADM8211=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_MWL8K=m
+CONFIG_ATH_COMMON=m
+CONFIG_ATH_CARDS=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+CONFIG_ATH5K_DEBUG=y
+# CONFIG_ATH5K_TRACER is not set
+CONFIG_ATH5K_PCI=y
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_PCI=y
+CONFIG_ATH9K_AHB=y
+# CONFIG_ATH9K_DEBUGFS is not set
+# CONFIG_ATH9K_LEGACY_RATE_CONTROL is not set
+CONFIG_ATH9K_HTC=m
+# CONFIG_ATH9K_HTC_DEBUGFS is not set
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+# CONFIG_CARL9170_HWRNG is not set
+# CONFIG_ATH6KL is not set
+CONFIG_AR5523=m
+CONFIG_WIL6210=m
+CONFIG_WIL6210_ISR_COR=y
+CONFIG_B43=m
+CONFIG_B43_SSB=y
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_SDIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_N=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+# CONFIG_B43LEGACY_DEBUG is not set
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
 # CONFIG_BRCMFMAC is not set
 CONFIG_HOSTAP=m
 CONFIG_HOSTAP_FIRMWARE=y
-# CONFIG_HOSTAP_FIRMWARE_NVRAM is not set
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
 CONFIG_HOSTAP_PLX=m
 CONFIG_HOSTAP_PCI=m
-CONFIG_HOSTAP_CS=m
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
 # CONFIG_IWLWIFI is not set
-# CONFIG_IWL4965 is not set
-# CONFIG_IWL3945 is not set
-# CONFIG_IWM is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
-# CONFIG_P54_COMMON is not set
-# CONFIG_RT2X00 is not set
-# CONFIG_RTL8192CE is not set
-# CONFIG_RTL8192SE is not set
-# CONFIG_RTL8192DE is not set
-# CONFIG_RTL8192CU is not set
-# CONFIG_WL1251 is not set
-# CONFIG_WL12XX_MENU is not set
-# CONFIG_ZD1211RW is not set
-# CONFIG_MWIFIEX is not set
+CONFIG_IWLEGACY=m
+CONFIG_IWL4965=m
+CONFIG_IWL3945=m
+
+#
+# iwl3945 / iwl4965 Debugging Options
+#
+# CONFIG_IWLEGACY_DEBUG is not set
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_LIBERTAS_MESH=y
+CONFIG_HERMES=m
+# CONFIG_HERMES_PRISM is not set
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_NORTEL_HERMES=m
+CONFIG_ORINOCO_USB=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_PCI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RT2800PCI_RT33XX=y
+CONFIG_RT2800PCI_RT35XX=y
+CONFIG_RT2800PCI_RT53XX=y
+CONFIG_RT2800PCI_RT3290=y
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800USB_RT35XX=y
+CONFIG_RT2800USB_RT53XX=y
+CONFIG_RT2800USB_RT55XX=y
+CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_MMIO=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_RTLWIFI=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTL8192C_COMMON=m
+CONFIG_WL_TI=y
+CONFIG_WL1251=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL12XX=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_WILINK_PLATFORM_DATA=y
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+# CONFIG_MWIFIEX_PCIE is not set
+CONFIG_MWIFIEX_USB=m
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
 #
-CONFIG_WAN=y
-CONFIG_HOSTESS_SV11=m
-CONFIG_COSA=m
-CONFIG_LANMEDIA=m
-CONFIG_SEALEVEL_4021=m
-CONFIG_HDLC=m
-CONFIG_HDLC_RAW=m
-CONFIG_HDLC_RAW_ETH=m
-CONFIG_HDLC_CISCO=m
-CONFIG_HDLC_FR=m
-CONFIG_HDLC_PPP=m
-
-#
-# X.25/LAPB support is disabled
-#
-CONFIG_PCI200SYN=m
-CONFIG_WANXL=m
-# CONFIG_WANXL_BUILD_FIRMWARE is not set
-CONFIG_PC300TOO=m
-CONFIG_N2=m
-CONFIG_C101=m
-CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
-CONFIG_DLCI_MAX=8
-CONFIG_SDLA=m
-CONFIG_SBNI=m
-CONFIG_SBNI_MULTILINE=y
+# CONFIG_WAN is not set
 CONFIG_XEN_NETDEV_FRONTEND=m
 CONFIG_XEN_NETDEV_BACKEND=m
 CONFIG_VMXNET3=m
@@ -2161,9 +2239,9 @@ CONFIG_ISDN_DRV_HISAX=m
 #
 CONFIG_HISAX_EURO=y
 CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
+CONFIG_HISAX_NO_SENDCOMPLETE=y
+CONFIG_HISAX_NO_LLC=y
+CONFIG_HISAX_NO_KEYPAD=y
 CONFIG_HISAX_1TR6=y
 CONFIG_HISAX_NI1=y
 CONFIG_HISAX_MAX_CARDS=8
@@ -2171,27 +2249,17 @@ CONFIG_HISAX_MAX_CARDS=8
 #
 # HiSax supported cards
 #
-CONFIG_HISAX_16_0=y
 CONFIG_HISAX_16_3=y
 CONFIG_HISAX_TELESPCI=y
 CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_AVM_A1=y
 CONFIG_HISAX_FRITZPCI=y
 CONFIG_HISAX_AVM_A1_PCMCIA=y
 CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_IX1MICROR2=y
 CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_ASUSCOM=y
-CONFIG_HISAX_TELEINT=y
-CONFIG_HISAX_HFCS=y
 CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_SPORTSTER=y
-CONFIG_HISAX_MIC=y
 CONFIG_HISAX_NETJET=y
 CONFIG_HISAX_NETJET_U=y
 CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_ISURF=y
-CONFIG_HISAX_HSTSAPHIR=y
 CONFIG_HISAX_BKM_A4T=y
 CONFIG_HISAX_SCT_QUADRO=y
 CONFIG_HISAX_GAZEL=y
@@ -2199,15 +2267,11 @@ CONFIG_HISAX_HFC_PCI=y
 CONFIG_HISAX_W6692=y
 CONFIG_HISAX_HFC_SX=y
 CONFIG_HISAX_ENTERNOW_PCI=y
-CONFIG_HISAX_DEBUG=y
+# CONFIG_HISAX_DEBUG is not set
 
 #
 # HiSax PCMCIA card service modules
 #
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
 
 #
 # HiSax sub driver modules
@@ -2220,10 +2284,6 @@ CONFIG_HISAX_FRITZ_PCIPNP=m
 #
 # Active cards
 #
-CONFIG_ISDN_DRV_ICN=m
-CONFIG_ISDN_DRV_PCBIT=m
-CONFIG_ISDN_DRV_SC=m
-CONFIG_ISDN_DRV_ACT2000=m
 CONFIG_ISDN_CAPI=m
 CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
 CONFIG_CAPI_TRACE=y
@@ -2235,12 +2295,8 @@ CONFIG_ISDN_CAPI_CAPIDRV=m
 # CAPI hardware drivers
 #
 CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1ISA=m
 CONFIG_ISDN_DRV_AVMB1_B1PCI=m
 CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_T1ISA=m
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
 CONFIG_ISDN_DRV_AVMB1_T1PCI=m
 CONFIG_ISDN_DRV_AVMB1_C4=m
 CONFIG_CAPI_EICON=y
@@ -2250,43 +2306,20 @@ CONFIG_ISDN_DIVAS_PRIPCI=y
 CONFIG_ISDN_DIVAS_DIVACAPI=m
 CONFIG_ISDN_DIVAS_USERIDI=m
 CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_CAPI=y
-# CONFIG_GIGASET_I4L is not set
-# CONFIG_GIGASET_DUMMYLL is not set
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-CONFIG_GIGASET_M101=m
-# CONFIG_GIGASET_DEBUG is not set
+# CONFIG_ISDN_DRV_GIGASET is not set
 CONFIG_HYSDN=m
 CONFIG_HYSDN_CAPI=y
-CONFIG_MISDN=m
-CONFIG_MISDN_DSP=m
-CONFIG_MISDN_L1OIP=m
-
-#
-# mISDN hardware drivers
-#
-CONFIG_MISDN_HFCPCI=m
-CONFIG_MISDN_HFCMULTI=m
-CONFIG_MISDN_HFCUSB=m
-CONFIG_MISDN_AVMFRITZ=m
-CONFIG_MISDN_SPEEDFAX=m
-CONFIG_MISDN_INFINEON=m
-CONFIG_MISDN_W6692=m
-CONFIG_MISDN_NETJET=m
-CONFIG_MISDN_IPAC=m
-CONFIG_MISDN_ISAR=m
+# CONFIG_MISDN is not set
 CONFIG_ISDN_HDLC=m
-# CONFIG_PHONE is not set
 
 #
 # Input device support
 #
 CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_FF_MEMLESS is not set
 CONFIG_INPUT_POLLDEV=m
 CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INPUT_MATRIXKMAP=m
 
 #
 # Userland interfaces
@@ -2296,59 +2329,82 @@ CONFIG_INPUT_MOUSEDEV=y
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 # CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
+CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_EVBUG is not set
 
 #
 # Input Device Drivers
 #
 CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ADP5588=m
-CONFIG_KEYBOARD_ADP5589=m
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
 CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_QT1070=m
-CONFIG_KEYBOARD_QT2160=m
-CONFIG_KEYBOARD_LKKBD=m
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
 CONFIG_KEYBOARD_GPIO=m
 CONFIG_KEYBOARD_GPIO_POLLED=m
-CONFIG_KEYBOARD_TCA6416=m
-CONFIG_KEYBOARD_MATRIX=m
-CONFIG_KEYBOARD_LM8323=m
-CONFIG_KEYBOARD_MAX7359=m
-CONFIG_KEYBOARD_MCS=m
-CONFIG_KEYBOARD_MPR121=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_OPENCORES=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-# CONFIG_INPUT_MOUSE is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_LM8333=m
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_CYAPA=m
+CONFIG_MOUSE_VSXXXAA=m
+# CONFIG_MOUSE_GPIO is not set
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+CONFIG_MOUSE_SYNAPTICS_USB=m
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_AD714X=m
-CONFIG_INPUT_AD714X_I2C=m
-CONFIG_INPUT_BMA150=m
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
 CONFIG_INPUT_PCSPKR=m
 # CONFIG_INPUT_MMA8450 is not set
 # CONFIG_INPUT_MPU3050 is not set
 CONFIG_INPUT_APANEL=m
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
 # CONFIG_INPUT_WISTRON_BTNS is not set
-# CONFIG_INPUT_ATLAS_BTNS is not set
+CONFIG_INPUT_ATLAS_BTNS=m
 CONFIG_INPUT_ATI_REMOTE2=m
 CONFIG_INPUT_KEYSPAN_REMOTE=m
 # CONFIG_INPUT_KXTJ9 is not set
 CONFIG_INPUT_POWERMATE=m
 CONFIG_INPUT_YEALINK=m
 CONFIG_INPUT_CM109=m
-# CONFIG_INPUT_UINPUT is not set
-CONFIG_INPUT_PCF50633_PMU=m
-CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_RETU_PWRBUTTON=m
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_PCF8574 is not set
 CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
 # CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
 # CONFIG_INPUT_CMA3000 is not set
-CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
+CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y
 
 #
 # Hardware I/O ports
@@ -2360,125 +2416,119 @@ CONFIG_SERIO_SERPORT=y
 # CONFIG_SERIO_PARKBD is not set
 # CONFIG_SERIO_PCIPS2 is not set
 CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
+CONFIG_SERIO_RAW=m
 CONFIG_SERIO_ALTERA_PS2=m
-CONFIG_SERIO_PS2MULT=m
+# CONFIG_SERIO_PS2MULT is not set
+CONFIG_SERIO_ARC_PS2=m
 # CONFIG_GAMEPORT is not set
 
 #
 # Character devices
 #
+CONFIG_TTY=y
 CONFIG_VT=y
 CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_VT_CONSOLE_SLEEP=y
 CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
 CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_ROCKETPORT=m
+CONFIG_CYCLADES=m
+# CONFIG_CYZ_INTR is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_SYNCLINK_GT=m
 CONFIG_NOZOMI=m
+# CONFIG_ISI is not set
+CONFIG_N_HDLC=m
 CONFIG_N_GSM=m
 # CONFIG_TRACE_SINK is not set
-CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
 
 #
 # Serial drivers
 #
 CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_DMA=y
 CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_PNP=y
-CONFIG_SERIAL_8250_CS=m
 CONFIG_SERIAL_8250_NR_UARTS=4
 CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_FOURPORT=m
-CONFIG_SERIAL_8250_ACCENT=m
-CONFIG_SERIAL_8250_BOCA=m
-CONFIG_SERIAL_8250_EXAR_ST16C554=m
-CONFIG_SERIAL_8250_HUB6=m
 CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
 CONFIG_SERIAL_8250_RSA=y
-# CONFIG_SERIAL_8250_MCA is not set
+CONFIG_SERIAL_8250_DW=m
 
 #
 # Non-8250 serial port support
 #
-CONFIG_SERIAL_MFD_HSU=m
-CONFIG_SERIAL_UARTLITE=m
+# CONFIG_SERIAL_MFD_HSU is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
-CONFIG_SERIAL_TIMBERDALE=m
-CONFIG_SERIAL_ALTERA_JTAGUART=m
-CONFIG_SERIAL_ALTERA_UART=m
-CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
-CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
-CONFIG_SERIAL_PCH_UART=m
-CONFIG_SERIAL_XILINX_PS_UART=m
+CONFIG_SERIAL_JSM=m
+CONFIG_SERIAL_SCCNXP=m
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+CONFIG_SERIAL_ARC=m
+CONFIG_SERIAL_ARC_NR_PORTS=1
+CONFIG_SERIAL_RP2=m
+CONFIG_SERIAL_RP2_NR_UARTS=32
+# CONFIG_TTY_PRINTK is not set
 CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
+CONFIG_LP_CONSOLE=y
 CONFIG_PPDEV=m
 CONFIG_HVC_DRIVER=y
 CONFIG_HVC_IRQ=y
 CONFIG_HVC_XEN=y
+CONFIG_HVC_XEN_FRONTEND=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_PANIC_EVENT=y
-# CONFIG_IPMI_PANIC_STRING is not set
+# CONFIG_IPMI_PANIC_EVENT is not set
 CONFIG_IPMI_DEVICE_INTERFACE=m
 CONFIG_IPMI_SI=m
 CONFIG_IPMI_WATCHDOG=m
 CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_TIMERIOMEM=m
 CONFIG_HW_RANDOM_INTEL=m
 CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_ATMEL=m
 CONFIG_HW_RANDOM_GEODE=m
 CONFIG_HW_RANDOM_VIA=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_NVRAM=m
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
+CONFIG_HW_RANDOM_EXYNOS=m
+CONFIG_NVRAM=y
+CONFIG_R3964=m
 # CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# PCMCIA character devices
-#
-CONFIG_SYNCLINK_CS=m
-# CONFIG_CARDMAN_4000 is not set
-# CONFIG_CARDMAN_4040 is not set
-CONFIG_IPWIRELESS=m
+CONFIG_SONYPI=m
 CONFIG_MWAVE=m
-CONFIG_SCx200_GPIO=m
 CONFIG_PC8736x_GPIO=m
 CONFIG_NSC_GPIO=m
-# CONFIG_RAW_DRIVER is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
 CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
+# CONFIG_HPET_MMAP is not set
 CONFIG_HANGCHECK_TIMER=m
 # CONFIG_TCG_TPM is not set
 # CONFIG_TELCLOCK is not set
-CONFIG_DEVPORT=y
-# CONFIG_RAMOOPS is not set
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_COMPAT=y
 CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_MUX=m
-
-#
-# Multiplexer I2C Chip support
-#
-CONFIG_I2C_MUX_GPIO=m
-CONFIG_I2C_MUX_PCA9541=m
-CONFIG_I2C_MUX_PCA954x=m
+# CONFIG_I2C_MUX is not set
 CONFIG_I2C_HELPER_AUTO=y
 CONFIG_I2C_SMBUS=m
 CONFIG_I2C_ALGOBIT=m
@@ -2499,6 +2549,7 @@ CONFIG_I2C_AMD756_S4882=m
 CONFIG_I2C_AMD8111=m
 CONFIG_I2C_I801=m
 CONFIG_I2C_ISCH=m
+CONFIG_I2C_ISMT=m
 CONFIG_I2C_PIIX4=m
 CONFIG_I2C_NFORCE2=m
 CONFIG_I2C_NFORCE2_S4985=m
@@ -2516,16 +2567,18 @@ CONFIG_I2C_SCMI=m
 #
 # I2C system bus drivers (mostly embedded / system-on-chip)
 #
+CONFIG_I2C_CBUS_GPIO=m
 CONFIG_I2C_DESIGNWARE_CORE=m
-CONFIG_I2C_DESIGNWARE_PCI=m
-CONFIG_I2C_GPIO=m
-CONFIG_I2C_INTEL_MID=m
-CONFIG_I2C_OCORES=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
 CONFIG_I2C_PCA_PLATFORM=m
 # CONFIG_I2C_PXA_PCI is not set
 CONFIG_I2C_SIMTEC=m
-CONFIG_I2C_XILINX=m
-CONFIG_I2C_EG20T=m
+# CONFIG_I2C_XILINX is not set
 
 #
 # External I2C/SMBus adapter drivers
@@ -2533,21 +2586,32 @@ CONFIG_I2C_EG20T=m
 CONFIG_I2C_DIOLAN_U2C=m
 CONFIG_I2C_PARPORT=m
 CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_TAOS_EVM=m
+# CONFIG_I2C_TAOS_EVM is not set
 CONFIG_I2C_TINY_USB=m
+CONFIG_I2C_VIPERBOARD=m
 
 #
 # Other I2C/SMBus bus drivers
 #
-CONFIG_I2C_PCA_ISA=m
-CONFIG_I2C_STUB=m
-# CONFIG_SCx200_I2C is not set
 CONFIG_SCx200_ACB=m
+CONFIG_I2C_STUB=m
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_SPI is not set
 
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+CONFIG_HSI=m
+CONFIG_HSI_BOARDINFO=y
+
+#
+# HSI clients
+#
+CONFIG_HSI_CHAR=m
+
 #
 # PPS support
 #
@@ -2570,67 +2634,76 @@ CONFIG_PPS=m
 # PTP clock support
 #
 CONFIG_PTP_1588_CLOCK=m
-CONFIG_DP83640_PHY=m
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+CONFIG_PTP_1588_CLOCK_PCH=m
 CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
 CONFIG_GPIOLIB=y
+CONFIG_GPIO_ACPI=y
 # CONFIG_DEBUG_GPIO is not set
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC=m
-CONFIG_GPIO_MAX730X=m
 
 #
 # Memory mapped GPIO drivers:
 #
-CONFIG_GPIO_GENERIC_PLATFORM=m
-CONFIG_GPIO_IT8761E=m
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+CONFIG_GPIO_TS5500=m
 CONFIG_GPIO_SCH=m
-CONFIG_GPIO_VX855=m
+CONFIG_GPIO_ICH=m
+# CONFIG_GPIO_VX855 is not set
+# CONFIG_GPIO_LYNXPOINT is not set
 
 #
 # I2C GPIO expanders:
 #
-CONFIG_GPIO_MAX7300=m
-CONFIG_GPIO_MAX732X=m
-CONFIG_GPIO_PCA953X=m
-CONFIG_GPIO_PCF857X=m
-CONFIG_GPIO_ADP5588=m
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
 
 #
 # PCI GPIO expanders:
 #
-CONFIG_GPIO_CS5535=m
-# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_AMD8111 is not set
 # CONFIG_GPIO_LANGWELL is not set
-CONFIG_GPIO_PCH=m
-CONFIG_GPIO_ML_IOH=m
-# CONFIG_GPIO_TIMBERDALE is not set
-CONFIG_GPIO_RDC321X=m
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
 
 #
 # SPI GPIO expanders:
 #
-CONFIG_GPIO_MCP23S08=m
+# CONFIG_GPIO_MCP23S08 is not set
 
 #
 # AC97 GPIO expanders:
 #
-# CONFIG_GPIO_UCB1400 is not set
 
 #
 # MODULbus GPIO expanders:
 #
-CONFIG_GPIO_JANZ_TTL=m
+
+#
+# USB GPIO expanders:
+#
+CONFIG_GPIO_VIPERBOARD=m
 CONFIG_W1=m
 CONFIG_W1_CON=y
 
 #
 # 1-wire Bus Masters
 #
-CONFIG_W1_MASTER_MATROX=m
+# CONFIG_W1_MASTER_MATROX is not set
 CONFIG_W1_MASTER_DS2490=m
 CONFIG_W1_MASTER_DS2482=m
 CONFIG_W1_MASTER_DS1WM=m
-CONFIG_W1_MASTER_GPIO=m
+# CONFIG_W1_MASTER_GPIO is not set
 
 #
 # 1-wire Slaves
@@ -2638,29 +2711,39 @@ CONFIG_W1_MASTER_GPIO=m
 CONFIG_W1_SLAVE_THERM=m
 CONFIG_W1_SLAVE_SMEM=m
 CONFIG_W1_SLAVE_DS2408=m
+CONFIG_W1_SLAVE_DS2408_READBACK=y
+CONFIG_W1_SLAVE_DS2413=m
 CONFIG_W1_SLAVE_DS2423=m
 CONFIG_W1_SLAVE_DS2431=m
 CONFIG_W1_SLAVE_DS2433=m
 CONFIG_W1_SLAVE_DS2433_CRC=y
 CONFIG_W1_SLAVE_DS2760=m
 CONFIG_W1_SLAVE_DS2780=m
+CONFIG_W1_SLAVE_DS2781=m
+CONFIG_W1_SLAVE_DS28E04=m
 CONFIG_W1_SLAVE_BQ27000=m
 CONFIG_POWER_SUPPLY=y
 # CONFIG_POWER_SUPPLY_DEBUG is not set
 # CONFIG_PDA_POWER is not set
+CONFIG_GENERIC_ADC_BATTERY=m
 # CONFIG_TEST_POWER is not set
 # CONFIG_BATTERY_DS2760 is not set
 # CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
 # CONFIG_BATTERY_DS2782 is not set
-# CONFIG_BATTERY_BQ20Z75 is not set
+# CONFIG_BATTERY_SBS is not set
 # CONFIG_BATTERY_BQ27x00 is not set
 # CONFIG_BATTERY_MAX17040 is not set
 # CONFIG_BATTERY_MAX17042 is not set
-# CONFIG_CHARGER_PCF50633 is not set
-# CONFIG_CHARGER_ISP1704 is not set
 # CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
 # CONFIG_CHARGER_GPIO is not set
-CONFIG_HWMON=m
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+CONFIG_POWER_RESET=y
+CONFIG_POWER_AVS=y
+CONFIG_HWMON=y
 CONFIG_HWMON_VID=m
 # CONFIG_HWMON_DEBUG_CHIP is not set
 
@@ -2677,6 +2760,8 @@ CONFIG_SENSORS_ADM1026=m
 CONFIG_SENSORS_ADM1029=m
 CONFIG_SENSORS_ADM1031=m
 CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_ADT7X10=m
+CONFIG_SENSORS_ADT7410=m
 CONFIG_SENSORS_ADT7411=m
 CONFIG_SENSORS_ADT7462=m
 CONFIG_SENSORS_ADT7470=m
@@ -2697,12 +2782,14 @@ CONFIG_SENSORS_FSCHMD=m
 CONFIG_SENSORS_G760A=m
 CONFIG_SENSORS_GL518SM=m
 CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_GPIO_FAN=m
+# CONFIG_SENSORS_GPIO_FAN is not set
+CONFIG_SENSORS_HIH6130=m
 CONFIG_SENSORS_CORETEMP=m
 CONFIG_SENSORS_IBMAEM=m
 CONFIG_SENSORS_IBMPEX=m
+# CONFIG_SENSORS_IIO_HWMON is not set
 CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_JC42=m
+# CONFIG_SENSORS_JC42 is not set
 CONFIG_SENSORS_LINEAGE=m
 CONFIG_SENSORS_LM63=m
 CONFIG_SENSORS_LM73=m
@@ -2720,14 +2807,19 @@ CONFIG_SENSORS_LTC4151=m
 CONFIG_SENSORS_LTC4215=m
 CONFIG_SENSORS_LTC4245=m
 CONFIG_SENSORS_LTC4261=m
+CONFIG_SENSORS_LM95234=m
 CONFIG_SENSORS_LM95241=m
 CONFIG_SENSORS_LM95245=m
 CONFIG_SENSORS_MAX16065=m
 CONFIG_SENSORS_MAX1619=m
 CONFIG_SENSORS_MAX1668=m
+CONFIG_SENSORS_MAX197=m
 CONFIG_SENSORS_MAX6639=m
 CONFIG_SENSORS_MAX6642=m
 CONFIG_SENSORS_MAX6650=m
+CONFIG_SENSORS_MAX6697=m
+CONFIG_SENSORS_MCP3021=m
+CONFIG_SENSORS_NCT6775=m
 CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PC87360=m
 CONFIG_SENSORS_PC87427=m
@@ -2736,20 +2828,20 @@ CONFIG_PMBUS=m
 CONFIG_SENSORS_PMBUS=m
 CONFIG_SENSORS_ADM1275=m
 CONFIG_SENSORS_LM25066=m
-CONFIG_SENSORS_LTC2978=m
+# CONFIG_SENSORS_LTC2978 is not set
 CONFIG_SENSORS_MAX16064=m
 CONFIG_SENSORS_MAX34440=m
 CONFIG_SENSORS_MAX8688=m
 CONFIG_SENSORS_UCD9000=m
 CONFIG_SENSORS_UCD9200=m
-CONFIG_SENSORS_ZL6100=m
+# CONFIG_SENSORS_ZL6100 is not set
 CONFIG_SENSORS_SHT15=m
 CONFIG_SENSORS_SHT21=m
 CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_SMM665=m
+# CONFIG_SENSORS_SMM665 is not set
 CONFIG_SENSORS_DME1737=m
 CONFIG_SENSORS_EMC1403=m
-CONFIG_SENSORS_EMC2103=m
+# CONFIG_SENSORS_EMC2103 is not set
 CONFIG_SENSORS_EMC6W201=m
 CONFIG_SENSORS_SMSC47M1=m
 CONFIG_SENSORS_SMSC47M192=m
@@ -2760,6 +2852,8 @@ CONFIG_SENSORS_SCH5636=m
 CONFIG_SENSORS_ADS1015=m
 CONFIG_SENSORS_ADS7828=m
 CONFIG_SENSORS_AMC6821=m
+CONFIG_SENSORS_INA209=m
+CONFIG_SENSORS_INA2XX=m
 CONFIG_SENSORS_THMC50=m
 CONFIG_SENSORS_TMP102=m
 CONFIG_SENSORS_TMP401=m
@@ -2786,60 +2880,65 @@ CONFIG_SENSORS_APPLESMC=m
 CONFIG_SENSORS_ACPI_POWER=m
 CONFIG_SENSORS_ATK0110=m
 CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_INTEL_POWERCLAMP=m
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_CORE=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
+CONFIG_WATCHDOG_NOWAYOUT=y
 
 #
 # Watchdog Device Drivers
 #
 CONFIG_SOFT_WATCHDOG=m
-CONFIG_ACQUIRE_WDT=m
-CONFIG_ADVANTECH_WDT=m
+CONFIG_RETU_WATCHDOG=m
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
 CONFIG_ALIM1535_WDT=m
 CONFIG_ALIM7101_WDT=m
 CONFIG_F71808E_WDT=m
 CONFIG_SP5100_TCO=m
 CONFIG_GEODE_WDT=m
-CONFIG_SC520_WDT=m
+# CONFIG_SC520_WDT is not set
 CONFIG_SBC_FITPC2_WATCHDOG=m
-CONFIG_EUROTECH_WDT=m
+# CONFIG_EUROTECH_WDT is not set
 CONFIG_IB700_WDT=m
 CONFIG_IBMASR=m
-CONFIG_WAFER_WDT=m
+# CONFIG_WAFER_WDT is not set
 CONFIG_I6300ESB_WDT=m
+CONFIG_IE6XX_WDT=m
 CONFIG_ITCO_WDT=m
 # CONFIG_ITCO_VENDOR_SUPPORT is not set
 CONFIG_IT8712F_WDT=m
 CONFIG_IT87_WDT=m
 CONFIG_HP_WATCHDOG=m
 CONFIG_HPWDT_NMI_DECODING=y
-CONFIG_SC1200_WDT=m
-CONFIG_SCx200_WDT=m
-CONFIG_PC87413_WDT=m
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
 CONFIG_NV_TCO=m
-CONFIG_60XX_WDT=m
-CONFIG_SBC8360_WDT=m
-CONFIG_SBC7240_WDT=m
-CONFIG_CPU5_WDT=m
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
 CONFIG_SMSC_SCH311X_WDT=m
-CONFIG_SMSC37B787_WDT=m
+# CONFIG_SMSC37B787_WDT is not set
+CONFIG_VIA_WDT=m
 CONFIG_W83627HF_WDT=m
 CONFIG_W83697HF_WDT=m
 CONFIG_W83697UG_WDT=m
 CONFIG_W83877F_WDT=m
 CONFIG_W83977F_WDT=m
 CONFIG_MACHZ_WDT=m
-CONFIG_SBC_EPX_C3_WATCHDOG=m
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
 CONFIG_XEN_WDT=m
 
-#
-# ISA-based Watchdog Cards
-#
-# CONFIG_PCWATCHDOG is not set
-# CONFIG_MIXCOMWD is not set
-# CONFIG_WDT is not set
-
 #
 # PCI-based Watchdog Cards
 #
@@ -2857,397 +2956,588 @@ CONFIG_SSB_POSSIBLE=y
 #
 CONFIG_SSB=m
 CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
 CONFIG_SSB_PCIHOST_POSSIBLE=y
 CONFIG_SSB_PCIHOST=y
-# CONFIG_SSB_B43_PCI_BRIDGE is not set
-CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
-CONFIG_SSB_PCMCIAHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
 CONFIG_SSB_SDIOHOST_POSSIBLE=y
 CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
 # CONFIG_SSB_DEBUG is not set
 CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
 CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_SSB_DRIVER_GPIO=y
 CONFIG_BCMA_POSSIBLE=y
 
 #
 # Broadcom specific AMBA
 #
-CONFIG_BCMA=m
-CONFIG_BCMA_HOST_PCI_POSSIBLE=y
-# CONFIG_BCMA_HOST_PCI is not set
-# CONFIG_BCMA_DEBUG is not set
+# CONFIG_BCMA is not set
 
 #
 # Multifunction device drivers
 #
 CONFIG_MFD_CORE=m
-CONFIG_MFD_SM501=m
-# CONFIG_MFD_SM501_GPIO is not set
-CONFIG_HTC_PASIC3=m
-CONFIG_UCB1400_CORE=m
-CONFIG_TPS6105X=m
-CONFIG_TPS65010=m
-CONFIG_TPS6507X=m
-# CONFIG_MFD_TMIO is not set
-CONFIG_MFD_WM8400=m
-CONFIG_MFD_PCF50633=m
-CONFIG_PCF50633_ADC=m
-CONFIG_PCF50633_GPIO=m
-# CONFIG_ABX500_CORE is not set
 CONFIG_MFD_CS5535=m
-CONFIG_MFD_TIMBERDALE=m
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_LPC_ICH=m
 CONFIG_LPC_SCH=m
-CONFIG_MFD_RDC321X=m
-CONFIG_MFD_JANZ_CMODIO=m
-CONFIG_MFD_VX855=m
+# CONFIG_MFD_JANZ_CMODIO is not set
+CONFIG_MFD_VIPERBOARD=m
+CONFIG_MFD_RETU=m
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_MFD_RDC321X is not set
+CONFIG_MFD_RTSX_PCI=m
+# CONFIG_MFD_SI476X_CORE is not set
+CONFIG_MFD_SM501=m
+CONFIG_MFD_SM501_GPIO=y
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS65912 is not set
 CONFIG_MFD_WL1273_CORE=m
-CONFIG_REGULATOR=y
-# CONFIG_REGULATOR_DEBUG is not set
-# CONFIG_REGULATOR_DUMMY is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=m
-# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
-# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
-CONFIG_REGULATOR_GPIO=m
-# CONFIG_REGULATOR_BQ24022 is not set
-# CONFIG_REGULATOR_MAX1586 is not set
-# CONFIG_REGULATOR_MAX8649 is not set
-# CONFIG_REGULATOR_MAX8660 is not set
-# CONFIG_REGULATOR_MAX8952 is not set
-# CONFIG_REGULATOR_WM8400 is not set
-# CONFIG_REGULATOR_PCF50633 is not set
-# CONFIG_REGULATOR_LP3971 is not set
-# CONFIG_REGULATOR_LP3972 is not set
-# CONFIG_REGULATOR_TPS6105X is not set
-# CONFIG_REGULATOR_TPS65023 is not set
-# CONFIG_REGULATOR_TPS6507X is not set
-# CONFIG_REGULATOR_ISL6271A is not set
-# CONFIG_REGULATOR_AD5398 is not set
-CONFIG_MEDIA_SUPPORT=m
+CONFIG_MFD_LM3533=m
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TMIO is not set
+CONFIG_MFD_VX855=m
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=y
 
 #
 # Multimedia core support
 #
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+CONFIG_MEDIA_RC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_DEV=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_DVB_CORE=m
-CONFIG_DVB_NET=y
-CONFIG_VIDEO_MEDIA=m
-
-#
-# Multimedia drivers
-#
-CONFIG_RC_CORE=m
-CONFIG_LIRC=m
-CONFIG_RC_MAP=m
-# CONFIG_IR_NEC_DECODER is not set
-# CONFIG_IR_RC5_DECODER is not set
-# CONFIG_IR_RC6_DECODER is not set
-# CONFIG_IR_JVC_DECODER is not set
-# CONFIG_IR_SONY_DECODER is not set
-# CONFIG_IR_RC5_SZ_DECODER is not set
-# CONFIG_IR_MCE_KBD_DECODER is not set
-# CONFIG_IR_LIRC_CODEC is not set
-# CONFIG_RC_ATI_REMOTE is not set
-# CONFIG_IR_ENE is not set
-# CONFIG_IR_IMON is not set
-# CONFIG_IR_MCEUSB is not set
-# CONFIG_IR_ITE_CIR is not set
-# CONFIG_IR_FINTEK is not set
-# CONFIG_IR_NUVOTON is not set
-# CONFIG_IR_REDRAT3 is not set
-# CONFIG_IR_STREAMZAP is not set
-# CONFIG_IR_WINBOND_CIR is not set
-# CONFIG_RC_LOOPBACK is not set
-CONFIG_MEDIA_ATTACH=y
-CONFIG_MEDIA_TUNER=m
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_XC4000=m
-CONFIG_MEDIA_TUNER_MC44S803=m
 CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
 # CONFIG_VIDEO_ADV_DEBUG is not set
 # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_IR_I2C=m
-
-#
-# Encoders, decoders, sensors and other helper chips
-#
-
-#
-# Audio decoders, processors and mixers
-#
-# CONFIG_VIDEO_TVAUDIO is not set
-# CONFIG_VIDEO_TDA7432 is not set
-# CONFIG_VIDEO_TDA9840 is not set
-# CONFIG_VIDEO_TEA6415C is not set
-# CONFIG_VIDEO_TEA6420 is not set
-# CONFIG_VIDEO_MSP3400 is not set
-# CONFIG_VIDEO_CS5345 is not set
-# CONFIG_VIDEO_CS53L32A is not set
-# CONFIG_VIDEO_TLV320AIC23B is not set
-# CONFIG_VIDEO_WM8775 is not set
-# CONFIG_VIDEO_WM8739 is not set
-# CONFIG_VIDEO_VP27SMPX is not set
-
-#
-# RDS decoders
-#
-# CONFIG_VIDEO_SAA6588 is not set
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_DMA_SG=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+CONFIG_VIDEO_V4L2_INT_DEVICE=m
+CONFIG_DVB_CORE=y
+CONFIG_DVB_NET=y
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_MAX_ADAPTERS=8
+CONFIG_DVB_DYNAMIC_MINORS=y
 
 #
-# Video decoders
+# Media drivers
 #
-# CONFIG_VIDEO_ADV7180 is not set
-# CONFIG_VIDEO_BT819 is not set
-# CONFIG_VIDEO_BT856 is not set
-# CONFIG_VIDEO_BT866 is not set
-# CONFIG_VIDEO_KS0127 is not set
-# CONFIG_VIDEO_SAA7110 is not set
-# CONFIG_VIDEO_SAA711X is not set
-# CONFIG_VIDEO_SAA7191 is not set
-# CONFIG_VIDEO_TVP514X is not set
-# CONFIG_VIDEO_TVP5150 is not set
-# CONFIG_VIDEO_TVP7002 is not set
-# CONFIG_VIDEO_VPX3220 is not set
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_RC_DECODERS=y
+CONFIG_LIRC=m
+CONFIG_IR_LIRC_CODEC=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_RC5_SZ_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_RC_DEVICES=y
+# CONFIG_RC_ATI_REMOTE is not set
+CONFIG_IR_ENE=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_ITE_CIR=m
+CONFIG_IR_FINTEK=m
+CONFIG_IR_NUVOTON=m
+CONFIG_IR_REDRAT3=m
+CONFIG_IR_STREAMZAP=m
+CONFIG_IR_WINBOND_CIR=m
+CONFIG_IR_IGUANA=m
+CONFIG_IR_TTUSBIR=m
+CONFIG_RC_LOOPBACK=m
+CONFIG_IR_GPIO_CIR=m
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+# CONFIG_USB_GSPCA_TOPRO is not set
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_VIDEO_CPIA2=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+# CONFIG_USB_SN9C102 is not set
 
 #
-# Video and audio decoders
-#
-# CONFIG_VIDEO_SAA717X is not set
-# CONFIG_VIDEO_CX25840 is not set
-
+# Analog TV USB devices
 #
-# MPEG video encoders
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_TLG2300 is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_VIDEO_STK1160 is not set
+
+#
+# Analog/digital TV USB devices
+#
+CONFIG_VIDEO_AU0828=m
+CONFIG_VIDEO_AU0828_V4L2=y
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_TM6000=m
+CONFIG_VIDEO_TM6000_ALSA=m
+CONFIG_VIDEO_TM6000_DVB=m
+
+#
+# Digital TV USB devices
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_PCTV452E is not set
+CONFIG_DVB_USB_DW2102=m
+CONFIG_DVB_USB_CINERGY_T2=m
+CONFIG_DVB_USB_DTV5100=m
+CONFIG_DVB_USB_FRIIO=m
+CONFIG_DVB_USB_AZ6027=m
+CONFIG_DVB_USB_TECHNISAT_USB2=m
+CONFIG_DVB_USB_V2=m
+CONFIG_DVB_USB_AF9015=m
+CONFIG_DVB_USB_AF9035=m
+CONFIG_DVB_USB_ANYSEE=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_AZ6007=m
+CONFIG_DVB_USB_CE6230=m
+CONFIG_DVB_USB_EC168=m
+CONFIG_DVB_USB_GL861=m
+# CONFIG_DVB_USB_IT913X is not set
+CONFIG_DVB_USB_LME2510=m
+# CONFIG_DVB_USB_MXL111SF is not set
+CONFIG_DVB_USB_RTL28XXU=m
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_SMS_USB_DRV=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+
+#
+# Media capture support
+#
+CONFIG_VIDEO_MEYE=m
+
+#
+# Media capture/analog TV support
 #
-# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_VIDEO_IVTV is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_MXB is not set
 
 #
-# Video encoders
-#
-# CONFIG_VIDEO_SAA7127 is not set
-# CONFIG_VIDEO_SAA7185 is not set
-# CONFIG_VIDEO_ADV7170 is not set
-# CONFIG_VIDEO_ADV7175 is not set
-# CONFIG_VIDEO_ADV7343 is not set
-# CONFIG_VIDEO_AK881X is not set
+# Media capture/analog/hybrid TV support
+#
+CONFIG_VIDEO_CX18=m
+CONFIG_VIDEO_CX18_ALSA=m
+CONFIG_VIDEO_CX23885=m
+CONFIG_MEDIA_ALTERA_CI=m
+CONFIG_VIDEO_CX25821=m
+CONFIG_VIDEO_CX25821_ALSA=m
+CONFIG_VIDEO_CX88=m
+CONFIG_VIDEO_CX88_ALSA=m
+CONFIG_VIDEO_CX88_BLACKBIRD=m
+CONFIG_VIDEO_CX88_DVB=m
+CONFIG_VIDEO_CX88_VP3054=m
+CONFIG_VIDEO_CX88_MPEG=m
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_SAA7134_ALSA=m
+CONFIG_VIDEO_SAA7134_RC=y
+CONFIG_VIDEO_SAA7134_DVB=m
+CONFIG_VIDEO_SAA7164=m
+
+#
+# Media digital TV PCI Adapters
+#
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET_CORE=m
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
+# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
+CONFIG_DVB_PLUTO2=m
+CONFIG_DVB_DM1105=m
+CONFIG_DVB_PT1=m
+CONFIG_MANTIS_CORE=m
+CONFIG_DVB_MANTIS=m
+CONFIG_DVB_HOPPER=m
+CONFIG_DVB_NGENE=m
+# CONFIG_DVB_DDBRIDGE is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_CAFE_CCIC=m
+CONFIG_VIDEO_TIMBERDALE=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_SH_MOBILE_CSI2=m
+CONFIG_VIDEO_SH_MOBILE_CEU=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
+CONFIG_VIDEO_SH_VEU=m
+# CONFIG_V4L_TEST_DRIVERS is not set
 
 #
-# Camera sensor devices
+# Supported MMC/SDIO adapters
 #
-# CONFIG_VIDEO_OV7670 is not set
-# CONFIG_VIDEO_MT9P031 is not set
-# CONFIG_VIDEO_MT9T001 is not set
-# CONFIG_VIDEO_MT9V011 is not set
-# CONFIG_VIDEO_MT9V032 is not set
-# CONFIG_VIDEO_TCM825X is not set
-# CONFIG_VIDEO_SR030PC30 is not set
-# CONFIG_VIDEO_NOON010PC30 is not set
-# CONFIG_VIDEO_M5MOLS is not set
-# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_SMS_SDIO_DRV is not set
+# CONFIG_MEDIA_PARPORT_SUPPORT is not set
 
 #
-# Flash devices
+# Supported FireWire (IEEE 1394) Adapters
 #
-# CONFIG_VIDEO_ADP1653 is not set
+CONFIG_DVB_FIREDTV=m
+CONFIG_DVB_FIREDTV_INPUT=y
+CONFIG_MEDIA_COMMON_OPTIONS=y
 
 #
-# Video improvement chips
+# common driver options
 #
-# CONFIG_VIDEO_UPD64031A is not set
-# CONFIG_VIDEO_UPD64083 is not set
+CONFIG_VIDEO_CX2341X=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_CYPRESS_FIRMWARE=m
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_SMS_SIANO_MDTV=m
+CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
 
 #
-# Miscelaneous helper chips
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
 #
-# CONFIG_VIDEO_THS7303 is not set
-# CONFIG_VIDEO_M52790 is not set
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_BT848 is not set
-# CONFIG_VIDEO_PMS is not set
-# CONFIG_VIDEO_BWQCAM is not set
-# CONFIG_VIDEO_CQCAM is not set
-# CONFIG_VIDEO_W9966 is not set
-# CONFIG_VIDEO_CPIA2 is not set
-# CONFIG_VIDEO_ZORAN is not set
-# CONFIG_VIDEO_MEYE is not set
-# CONFIG_VIDEO_SAA7134 is not set
-# CONFIG_VIDEO_MXB is not set
-# CONFIG_VIDEO_HEXIUM_ORION is not set
-# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-# CONFIG_VIDEO_TIMBERDALE is not set
-# CONFIG_VIDEO_CX88 is not set
-# CONFIG_VIDEO_CX23885 is not set
-# CONFIG_VIDEO_CX25821 is not set
-# CONFIG_VIDEO_AU0828 is not set
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CX18 is not set
-# CONFIG_VIDEO_SAA7164 is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-# CONFIG_VIDEO_VIA_CAMERA is not set
-# CONFIG_SOC_CAMERA is not set
-CONFIG_V4L_USB_DRIVERS=y
-# CONFIG_USB_VIDEO_CLASS is not set
-CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-# CONFIG_USB_GSPCA is not set
-# CONFIG_VIDEO_PVRUSB2 is not set
-# CONFIG_VIDEO_HDPVR is not set
-# CONFIG_VIDEO_EM28XX is not set
-# CONFIG_VIDEO_TLG2300 is not set
-# CONFIG_VIDEO_CX231XX is not set
-# CONFIG_VIDEO_TM6000 is not set
-# CONFIG_VIDEO_USBVISION is not set
-# CONFIG_USB_ET61X251 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_PWC is not set
-# CONFIG_USB_ZR364XX is not set
-# CONFIG_USB_STKWEBCAM is not set
-# CONFIG_USB_S2255 is not set
-# CONFIG_V4L_MEM2MEM_DRIVERS is not set
-CONFIG_RADIO_ADAPTERS=y
-# CONFIG_RADIO_CADET is not set
-# CONFIG_RADIO_RTRACK is not set
-# CONFIG_RADIO_RTRACK2 is not set
-# CONFIG_RADIO_AZTECH is not set
-# CONFIG_RADIO_GEMTEK is not set
-# CONFIG_RADIO_MAXIRADIO is not set
-# CONFIG_RADIO_MIROPCM20 is not set
-# CONFIG_RADIO_SF16FMI is not set
-# CONFIG_RADIO_SF16FMR2 is not set
-# CONFIG_RADIO_TERRATEC is not set
-# CONFIG_RADIO_TRUST is not set
-# CONFIG_RADIO_TYPHOON is not set
-# CONFIG_RADIO_ZOLTRIX is not set
-# CONFIG_I2C_SI4713 is not set
-# CONFIG_RADIO_SI4713 is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_RADIO_SI470X is not set
-# CONFIG_USB_MR800 is not set
-# CONFIG_RADIO_TEA5764 is not set
-# CONFIG_RADIO_SAA7706H is not set
-# CONFIG_RADIO_TEF6862 is not set
-# CONFIG_RADIO_TIMBERDALE is not set
-# CONFIG_RADIO_WL1273 is not set
-
-#
-# Texas Instruments WL128x FM driver (ST based)
-#
-# CONFIG_RADIO_WL128X is not set
-CONFIG_DVB_MAX_ADAPTERS=8
-# CONFIG_DVB_DYNAMIC_MINORS is not set
-CONFIG_DVB_CAPTURE_DRIVERS=y
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-# CONFIG_TTPCI_EEPROM is not set
-# CONFIG_DVB_AV7110 is not set
-# CONFIG_DVB_BUDGET_CORE is not set
+CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
+CONFIG_MEDIA_ATTACH=y
+CONFIG_VIDEO_IR_I2C=m
 
 #
-# Supported USB Adapters
+# Audio decoders, processors and mixers
 #
-# CONFIG_DVB_USB is not set
-# CONFIG_DVB_TTUSB_BUDGET is not set
-# CONFIG_DVB_TTUSB_DEC is not set
-# CONFIG_SMS_SIANO_MDTV is not set
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_VIDEO_TDA7432=m
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS5345=m
+CONFIG_VIDEO_WM8775=m
 
 #
-# Supported FlexCopII (B2C2) Adapters
+# RDS decoders
 #
-# CONFIG_DVB_B2C2_FLEXCOP is not set
+CONFIG_VIDEO_SAA6588=m
 
 #
-# Supported BT878 Adapters
+# Video decoders
 #
+CONFIG_VIDEO_ADV7180=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_TVP5150=m
 
 #
-# Supported Pluto2 Adapters
+# Video and audio decoders
 #
-# CONFIG_DVB_PLUTO2 is not set
+CONFIG_VIDEO_CX25840=m
 
 #
-# Supported SDMC DM1105 Adapters
+# Video encoders
 #
-# CONFIG_DVB_DM1105 is not set
 
 #
-# Supported FireWire (IEEE 1394) Adapters
+# Camera sensor devices
 #
-# CONFIG_DVB_FIREDTV is not set
+CONFIG_VIDEO_OV7670=m
+CONFIG_VIDEO_MT9V011=m
 
 #
-# Supported Earthsoft PT1 Adapters
+# Flash devices
 #
-# CONFIG_DVB_PT1 is not set
 
 #
-# Supported Mantis Adapters
+# Video improvement chips
 #
-# CONFIG_MANTIS_CORE is not set
 
 #
-# Supported nGene Adapters
+# Miscelaneous helper chips
 #
-# CONFIG_DVB_NGENE is not set
 
 #
-# Supported ddbridge ('Octopus') Adapters
+# Sensors used on soc_camera driver
 #
-# CONFIG_DVB_DDBRIDGE is not set
 
 #
-# Supported DVB Frontends
+# soc_camera sensor drivers
 #
-# CONFIG_DVB_FE_CUSTOMISE is not set
+CONFIG_SOC_CAMERA_IMX074=m
+CONFIG_SOC_CAMERA_MT9M001=m
+CONFIG_SOC_CAMERA_MT9M111=m
+CONFIG_SOC_CAMERA_MT9T031=m
+CONFIG_SOC_CAMERA_MT9T112=m
+CONFIG_SOC_CAMERA_MT9V022=m
+CONFIG_SOC_CAMERA_OV2640=m
+CONFIG_SOC_CAMERA_OV5642=m
+CONFIG_SOC_CAMERA_OV6650=m
+CONFIG_SOC_CAMERA_OV772X=m
+CONFIG_SOC_CAMERA_OV9640=m
+CONFIG_SOC_CAMERA_OV9740=m
+CONFIG_SOC_CAMERA_RJ54N1=m
+CONFIG_SOC_CAMERA_TW9910=m
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2063=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_MT2131=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_XC4000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_MEDIA_TUNER_MXL5007T=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_MEDIA_TUNER_TDA18218=m
+CONFIG_MEDIA_TUNER_FC0011=m
+CONFIG_MEDIA_TUNER_FC0012=m
+CONFIG_MEDIA_TUNER_FC0013=m
+CONFIG_MEDIA_TUNER_TDA18212=m
+CONFIG_MEDIA_TUNER_E4000=m
+CONFIG_MEDIA_TUNER_FC2580=m
+CONFIG_MEDIA_TUNER_TUA9001=m
+CONFIG_MEDIA_TUNER_IT913X=m
+CONFIG_MEDIA_TUNER_R820T=m
 
 #
 # Multistandard (satellite) frontends
 #
+CONFIG_DVB_STB0899=m
+CONFIG_DVB_STB6100=m
+CONFIG_DVB_STV090x=m
+CONFIG_DVB_STV6110x=m
 
 #
 # Multistandard (cable + terrestrial) frontends
 #
+CONFIG_DVB_DRXK=m
+CONFIG_DVB_TDA18271C2DD=m
 
 #
 # DVB-S (satellite) frontends
 #
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_ZL10036=m
+CONFIG_DVB_ZL10039=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0288=m
+CONFIG_DVB_STB6000=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_STV6110=m
+CONFIG_DVB_STV0900=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_TDA8261=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TUNER_CX24113=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+CONFIG_DVB_CX24116=m
+CONFIG_DVB_SI21XX=m
+CONFIG_DVB_TS2020=m
+CONFIG_DVB_DS3000=m
+CONFIG_DVB_MB86A16=m
+CONFIG_DVB_TDA10071=m
 
 #
 # DVB-T (terrestrial) frontends
 #
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_DRXD=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_AF9013=m
+CONFIG_DVB_EC100=m
+CONFIG_DVB_STV0367=m
+CONFIG_DVB_CXD2820R=m
+CONFIG_DVB_RTL2830=m
+CONFIG_DVB_RTL2832=m
 
 #
 # DVB-C (cable) frontends
 #
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
 
 #
 # ATSC (North American/Korean Terrestrial/Cable DTV) frontends
 #
+CONFIG_DVB_NXT200X=m
+CONFIG_DVB_OR51211=m
+CONFIG_DVB_OR51132=m
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_LGDT3305=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_AU8522_DTV=m
+CONFIG_DVB_AU8522_V4L=m
+CONFIG_DVB_S5H1411=m
 
 #
 # ISDB-T (terrestrial) frontends
 #
+CONFIG_DVB_S921=m
+CONFIG_DVB_DIB8000=m
+CONFIG_DVB_MB86A20S=m
 
 #
 # Digital terrestrial only tuners/PLL
 #
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
 
 #
 # SEC control devices for DVB-S
 #
+CONFIG_DVB_LNBP21=m
+CONFIG_DVB_ISL6405=m
+CONFIG_DVB_ISL6421=m
+CONFIG_DVB_ISL6423=m
+CONFIG_DVB_A8293=m
+CONFIG_DVB_LGS8GXX=m
+CONFIG_DVB_ATBM8830=m
+CONFIG_DVB_TDA665x=m
+CONFIG_DVB_IX2505V=m
+CONFIG_DVB_M88RS2000=m
+CONFIG_DVB_AF9033=m
 
 #
 # Tools to develop new frontends
@@ -3257,27 +3547,40 @@ CONFIG_DVB_CAPTURE_DRIVERS=y
 #
 # Graphics support
 #
-CONFIG_AGP=m
-CONFIG_AGP_ALI=m
-CONFIG_AGP_ATI=m
-CONFIG_AGP_AMD=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_NVIDIA=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_SWORKS=m
-CONFIG_AGP_VIA=m
-CONFIG_AGP_EFFICEON=m
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+CONFIG_AGP_SIS=y
+# CONFIG_AGP_SWORKS is not set
+CONFIG_AGP_VIA=y
+# CONFIG_AGP_EFFICEON is not set
 CONFIG_VGA_ARB=y
 CONFIG_VGA_ARB_MAX_GPUS=16
-# CONFIG_VGA_SWITCHEROO is not set
+CONFIG_VGA_SWITCHEROO=y
 CONFIG_DRM=m
+CONFIG_DRM_USB=m
 CONFIG_DRM_KMS_HELPER=m
+# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
 CONFIG_DRM_TTM=m
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+CONFIG_DRM_I2C_NXP_TDA998X=m
 CONFIG_DRM_TDFX=m
 CONFIG_DRM_R128=m
 CONFIG_DRM_RADEON=m
-CONFIG_DRM_RADEON_KMS=y
+# CONFIG_DRM_RADEON_UMS is not set
+CONFIG_DRM_NOUVEAU=m
+CONFIG_NOUVEAU_DEBUG=5
+CONFIG_NOUVEAU_DEBUG_DEFAULT=3
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
 CONFIG_DRM_I810=m
 CONFIG_DRM_I915=m
 CONFIG_DRM_I915_KMS=y
@@ -3286,151 +3589,115 @@ CONFIG_DRM_SIS=m
 CONFIG_DRM_VIA=m
 CONFIG_DRM_SAVAGE=m
 CONFIG_DRM_VMWGFX=m
-CONFIG_STUB_POULSBO=m
-CONFIG_VGASTATE=m
+# CONFIG_DRM_VMWGFX_FBCON is not set
+CONFIG_DRM_GMA500=m
+CONFIG_DRM_GMA600=y
+CONFIG_DRM_GMA3600=y
+CONFIG_DRM_UDL=m
+CONFIG_DRM_AST=m
+CONFIG_DRM_MGAG200=m
+CONFIG_DRM_CIRRUS_QEMU=m
+CONFIG_DRM_QXL=m
+# CONFIG_VGASTATE is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_BOOT_VESA_SUPPORT=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+CONFIG_HDMI=y
+CONFIG_FB=m
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
 # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
 CONFIG_FB_SYS_FILLRECT=m
 CONFIG_FB_SYS_COPYAREA=m
 CONFIG_FB_SYS_IMAGEBLIT=m
 # CONFIG_FB_FOREIGN_ENDIAN is not set
 CONFIG_FB_SYS_FOPS=m
-# CONFIG_FB_WMT_GE_ROPS is not set
 CONFIG_FB_DEFERRED_IO=y
-CONFIG_FB_SVGALIB=m
+# CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
 
 #
 # Frame buffer hardware drivers
 #
-CONFIG_FB_CIRRUS=m
-CONFIG_FB_PM2=m
-CONFIG_FB_PM2_FIFO_DISCONNECT=y
-CONFIG_FB_CYBER2000=m
-CONFIG_FB_CYBER2000_DDC=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
 # CONFIG_FB_ARC is not set
-CONFIG_FB_ASILIANT=y
-# CONFIG_FB_IMSTT is not set
 # CONFIG_FB_VGA16 is not set
-CONFIG_FB_UVESA=m
-CONFIG_FB_VESA=y
+# CONFIG_FB_UVESA is not set
 # CONFIG_FB_N411 is not set
 # CONFIG_FB_HGA is not set
 # CONFIG_FB_S1D13XXX is not set
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_I810=m
-CONFIG_FB_I810_GTF=y
-CONFIG_FB_I810_I2C=y
-CONFIG_FB_LE80578=m
-CONFIG_FB_CARILLO_RANCH=m
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY128_BACKLIGHT=y
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-# CONFIG_FB_ATY_GENERIC_LCD is not set
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-CONFIG_FB_S3=m
-CONFIG_FB_S3_DDC=y
-CONFIG_FB_SAVAGE=m
-CONFIG_FB_SAVAGE_I2C=y
-CONFIG_FB_SAVAGE_ACCEL=y
-CONFIG_FB_SIS=m
-CONFIG_FB_SIS_300=y
-CONFIG_FB_SIS_315=y
-CONFIG_FB_VIA=m
-# CONFIG_FB_VIA_DIRECT_PROCFS is not set
-# CONFIG_FB_VIA_X_COMPATIBILITY is not set
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-CONFIG_FB_3DFX_ACCEL=y
-CONFIG_FB_3DFX_I2C=y
-CONFIG_FB_VOODOO1=m
-CONFIG_FB_VT8623=m
-CONFIG_FB_TRIDENT=m
-CONFIG_FB_ARK=m
-CONFIG_FB_PM3=m
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
 # CONFIG_FB_CARMINE is not set
-CONFIG_FB_GEODE=y
-CONFIG_FB_GEODE_LX=m
-CONFIG_FB_GEODE_GX=m
-CONFIG_FB_GEODE_GX1=m
-CONFIG_FB_TMIO=m
-CONFIG_FB_TMIO_ACCELL=y
-CONFIG_FB_SM501=m
-CONFIG_FB_SMSCUFX=m
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SM501 is not set
+# CONFIG_FB_SMSCUFX is not set
 # CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
 # CONFIG_FB_VIRTUAL is not set
 CONFIG_XEN_FBDEV_FRONTEND=m
-CONFIG_FB_METRONOME=m
+# CONFIG_FB_METRONOME is not set
 # CONFIG_FB_MB862XX is not set
 # CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=m
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_GENERIC=m
-CONFIG_BACKLIGHT_PROGEAR=m
-CONFIG_BACKLIGHT_CARILLO_RANCH=m
+# CONFIG_BACKLIGHT_GENERIC is not set
+# CONFIG_BACKLIGHT_LM3533 is not set
 CONFIG_BACKLIGHT_APPLE=m
-CONFIG_BACKLIGHT_SAHARA=m
-CONFIG_BACKLIGHT_ADP8860=m
-CONFIG_BACKLIGHT_ADP8870=m
-CONFIG_BACKLIGHT_PCF50633=m
-
-#
-# Display device support
-#
-CONFIG_DISPLAY_SUPPORT=m
-
-#
-# Display hardware drivers
-#
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
 
 #
 # Console display driver support
 #
 CONFIG_VGA_CONSOLE=y
 # CONFIG_VGACON_SOFT_SCROLLBACK is not set
-# CONFIG_MDA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=m
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 # CONFIG_FONTS is not set
 CONFIG_FONT_8x8=y
 CONFIG_FONT_8x16=y
 CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_LOGO_LINUX_CLUT224=y
 CONFIG_SOUND=m
 CONFIG_SOUND_OSS_CORE=y
@@ -3453,22 +3720,24 @@ CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
 CONFIG_SND_DYNAMIC_MINORS=y
 # CONFIG_SND_SUPPORT_OLD_API is not set
 CONFIG_SND_VERBOSE_PROCFS=y
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+# CONFIG_SND_DEBUG_VERBOSE is not set
+CONFIG_SND_PCM_XRUN_DEBUG=y
 CONFIG_SND_VMASTER=y
+CONFIG_SND_KCTL_JACK=y
 CONFIG_SND_DMA_SGBUF=y
 CONFIG_SND_RAWMIDI_SEQ=m
 CONFIG_SND_OPL3_LIB_SEQ=m
-CONFIG_SND_OPL4_LIB_SEQ=m
-CONFIG_SND_SBAWE_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
 CONFIG_SND_EMU10K1_SEQ=m
 CONFIG_SND_MPU401_UART=m
 CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL4_LIB=m
 CONFIG_SND_VX_LIB=m
 CONFIG_SND_AC97_CODEC=m
 CONFIG_SND_DRIVERS=y
-# CONFIG_SND_PCSP is not set
+CONFIG_SND_PCSP=m
 CONFIG_SND_DUMMY=m
 CONFIG_SND_ALOOP=m
 CONFIG_SND_VIRMIDI=m
@@ -3478,44 +3747,9 @@ CONFIG_SND_SERIAL_U16550=m
 CONFIG_SND_MPU401=m
 CONFIG_SND_PORTMAN2X4=m
 CONFIG_SND_AC97_POWER_SAVE=y
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT=60
-CONFIG_SND_WSS_LIB=m
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
 CONFIG_SND_SB_COMMON=m
-CONFIG_SND_SB8_DSP=m
 CONFIG_SND_SB16_DSP=m
-CONFIG_SND_ISA=y
-CONFIG_SND_ADLIB=m
-CONFIG_SND_AD1816A=m
-CONFIG_SND_AD1848=m
-CONFIG_SND_ALS100=m
-CONFIG_SND_AZT1605=m
-CONFIG_SND_AZT2316=m
-CONFIG_SND_AZT2320=m
-CONFIG_SND_CMI8330=m
-CONFIG_SND_CS4231=m
-CONFIG_SND_CS4236=m
-CONFIG_SND_ES1688=m
-CONFIG_SND_ES18XX=m
-CONFIG_SND_SC6000=m
-CONFIG_SND_GUSCLASSIC=m
-CONFIG_SND_GUSEXTREME=m
-CONFIG_SND_GUSMAX=m
-CONFIG_SND_INTERWAVE=m
-CONFIG_SND_INTERWAVE_STB=m
-CONFIG_SND_JAZZ16=m
-CONFIG_SND_OPL3SA2=m
-CONFIG_SND_OPTI92X_AD1848=m
-CONFIG_SND_OPTI92X_CS4231=m
-CONFIG_SND_OPTI93X=m
-CONFIG_SND_MIRO=m
-CONFIG_SND_SB8=m
-CONFIG_SND_SB16=m
-CONFIG_SND_SBAWE=m
-CONFIG_SND_SB16_CSP=y
-CONFIG_SND_SSCAPE=m
-CONFIG_SND_WAVEFRONT=m
-CONFIG_SND_MSND_PINNACLE=m
-CONFIG_SND_MSND_CLASSIC=m
 CONFIG_SND_TEA575X=m
 CONFIG_SND_PCI=y
 CONFIG_SND_AD1889=m
@@ -3528,7 +3762,7 @@ CONFIG_SND_ATIIXP_MODEM=m
 CONFIG_SND_AU8810=m
 CONFIG_SND_AU8820=m
 CONFIG_SND_AU8830=m
-CONFIG_SND_AW2=m
+# CONFIG_SND_AW2 is not set
 CONFIG_SND_AZT3328=m
 CONFIG_SND_BT87X=m
 # CONFIG_SND_BT87X_OVERCLOCK is not set
@@ -3571,11 +3805,10 @@ CONFIG_SND_HDA_PREALLOC_SIZE=64
 CONFIG_SND_HDA_HWDEP=y
 CONFIG_SND_HDA_RECONFIG=y
 CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_BEEP_MODE=2
+CONFIG_SND_HDA_INPUT_BEEP_MODE=1
 CONFIG_SND_HDA_INPUT_JACK=y
-CONFIG_SND_HDA_PATCH_LOADER=y
+# CONFIG_SND_HDA_PATCH_LOADER is not set
 CONFIG_SND_HDA_CODEC_REALTEK=y
-CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y
 CONFIG_SND_HDA_CODEC_ANALOG=y
 CONFIG_SND_HDA_CODEC_SIGMATEL=y
 CONFIG_SND_HDA_CODEC_VIA=y
@@ -3584,11 +3817,11 @@ CONFIG_SND_HDA_CODEC_CIRRUS=y
 CONFIG_SND_HDA_CODEC_CONEXANT=y
 CONFIG_SND_HDA_CODEC_CA0110=y
 CONFIG_SND_HDA_CODEC_CA0132=y
+# CONFIG_SND_HDA_CODEC_CA0132_DSP is not set
 CONFIG_SND_HDA_CODEC_CMEDIA=y
 CONFIG_SND_HDA_CODEC_SI3054=y
 CONFIG_SND_HDA_GENERIC=y
-CONFIG_SND_HDA_POWER_SAVE=y
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=60
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
 CONFIG_SND_HDSP=m
 CONFIG_SND_HDSPM=m
 CONFIG_SND_ICE1712=m
@@ -3607,7 +3840,7 @@ CONFIG_SND_RIPTIDE=m
 CONFIG_SND_RME32=m
 CONFIG_SND_RME96=m
 CONFIG_SND_RME9652=m
-CONFIG_SND_SIS7019=m
+# CONFIG_SND_SIS7019 is not set
 CONFIG_SND_SONICVIBES=m
 CONFIG_SND_TRIDENT=m
 CONFIG_SND_VIA82XX=m
@@ -3626,103 +3859,107 @@ CONFIG_SND_USB_6FIRE=m
 CONFIG_SND_FIREWIRE=y
 CONFIG_SND_FIREWIRE_LIB=m
 CONFIG_SND_FIREWIRE_SPEAKERS=m
-CONFIG_SND_ISIGHT=m
-CONFIG_SND_PCMCIA=y
-CONFIG_SND_VXPOCKET=m
-CONFIG_SND_PDAUDIOCF=m
+# CONFIG_SND_ISIGHT is not set
+# CONFIG_SND_SCS1X is not set
 # CONFIG_SND_SOC is not set
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-# CONFIG_HIDRAW is not set
 
 #
-# USB Input Devices
+# HID support
 #
-CONFIG_USB_HID=m
-# CONFIG_HID_PID is not set
-CONFIG_USB_HIDDEV=y
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
 
 #
 # Special HID drivers
 #
-CONFIG_HID_A4TECH=m
-CONFIG_HID_ACRUX=m
-# CONFIG_HID_ACRUX_FF is not set
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_PRODIKEYS=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_A4TECH=y
+# CONFIG_HID_ACRUX is not set
+CONFIG_HID_APPLE=y
+# CONFIG_HID_APPLEIR is not set
+CONFIG_HID_AUREAL=m
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+# CONFIG_HID_PRODIKEYS is not set
+CONFIG_HID_CYPRESS=y
+# CONFIG_HID_DRAGONRISE is not set
 # CONFIG_HID_EMS_FF is not set
-CONFIG_HID_ELECOM=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_HOLTEK=m
-# CONFIG_HOLTEK_FF is not set
+# CONFIG_HID_ELECOM is not set
+CONFIG_HID_EZKEY=y
+# CONFIG_HID_HOLTEK is not set
 CONFIG_HID_KEYTOUCH=m
-CONFIG_HID_KYE=m
-CONFIG_HID_UCLOGIC=m
-CONFIG_HID_WALTOP=m
+CONFIG_HID_KYE=y
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
 CONFIG_HID_GYRATION=m
+# CONFIG_HID_ICADE is not set
 CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
+CONFIG_HID_KENSINGTON=y
 CONFIG_HID_LCPOWER=m
-CONFIG_HID_LOGITECH=m
+CONFIG_HID_LENOVO_TPKBD=m
+CONFIG_HID_LOGITECH=y
 CONFIG_HID_LOGITECH_DJ=m
 # CONFIG_LOGITECH_FF is not set
 # CONFIG_LOGIRUMBLEPAD2_FF is not set
 # CONFIG_LOGIG940_FF is not set
 # CONFIG_LOGIWHEELS_FF is not set
-CONFIG_HID_MAGICMOUSE=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_MULTITOUCH=m
-CONFIG_HID_NTRIG=m
+# CONFIG_HID_MAGICMOUSE is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
 CONFIG_HID_ORTEK=m
-CONFIG_HID_PANTHERLORD=m
-# CONFIG_PANTHERLORD_FF is not set
+# CONFIG_HID_PANTHERLORD is not set
 CONFIG_HID_PETALYNX=m
 CONFIG_HID_PICOLCD=m
 CONFIG_HID_PICOLCD_FB=y
 CONFIG_HID_PICOLCD_BACKLIGHT=y
 CONFIG_HID_PICOLCD_LCD=y
 CONFIG_HID_PICOLCD_LEDS=y
-CONFIG_HID_PRIMAX=m
-CONFIG_HID_QUANTA=m
+CONFIG_HID_PICOLCD_CIR=y
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
 CONFIG_HID_ROCCAT=m
-CONFIG_HID_ROCCAT_COMMON=m
-CONFIG_HID_ROCCAT_ARVO=m
-CONFIG_HID_ROCCAT_KONE=m
-CONFIG_HID_ROCCAT_KONEPLUS=m
-CONFIG_HID_ROCCAT_KOVAPLUS=m
-CONFIG_HID_ROCCAT_PYRA=m
+CONFIG_HID_SAITEK=m
 CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
+# CONFIG_HID_SONY is not set
 CONFIG_HID_SPEEDLINK=m
+# CONFIG_HID_STEELSERIES is not set
 CONFIG_HID_SUNPLUS=m
-CONFIG_HID_GREENASIA=m
-# CONFIG_GREENASIA_FF is not set
-CONFIG_HID_SMARTJOYPLUS=m
-# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+CONFIG_HID_TIVO=m
 CONFIG_HID_TOPSEED=m
-CONFIG_HID_THRUSTMASTER=m
-# CONFIG_THRUSTMASTER_FF is not set
-CONFIG_HID_WACOM=m
-# CONFIG_HID_WACOM_POWER_SUPPLY is not set
-CONFIG_HID_WIIMOTE=m
-CONFIG_HID_ZEROPLUS=m
-# CONFIG_ZEROPLUS_FF is not set
+CONFIG_HID_THINGM=m
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
 CONFIG_HID_ZYDACRON=m
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_HID_SENSOR_HUB=m
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+CONFIG_I2C_HID=m
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
 CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB=y
 # CONFIG_USB_DEBUG is not set
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -3730,14 +3967,12 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 #
 # Miscellaneous USB options
 #
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_DEVICE_CLASS=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_OTG is not set
-CONFIG_USB_DWC3=m
-# CONFIG_USB_DWC3_DEBUG is not set
-# CONFIG_USB_MON is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
 CONFIG_USB_WUSB=m
 CONFIG_USB_WUSB_CBAF=m
 # CONFIG_USB_WUSB_CBAF_DEBUG is not set
@@ -3747,26 +3982,30 @@ CONFIG_USB_WUSB_CBAF=m
 #
 # CONFIG_USB_C67X00_HCD is not set
 CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=m
 # CONFIG_USB_XHCI_HCD_DEBUGGING is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_OXU210HP_HCD=m
-CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_EHCI_PCI=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
 CONFIG_USB_ISP1362_HCD=m
 CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
 # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
 CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_U132_HCD=m
+# CONFIG_USB_U132_HCD is not set
 CONFIG_USB_SL811_HCD=m
-# CONFIG_USB_SL811_HCD_ISO is not set
-# CONFIG_USB_SL811_CS is not set
-CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_SL811_HCD_ISO=y
+# CONFIG_USB_R8A66597_HCD is not set
 CONFIG_USB_WHCI_HCD=m
 CONFIG_USB_HWA_HCD=m
+CONFIG_USB_HCD_SSB=m
 
 #
 # USB Device Class drivers
@@ -3783,7 +4022,7 @@ CONFIG_USB_TMC=m
 #
 # also be needed; see USB_STORAGE Help for more info
 #
-CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
 CONFIG_USB_STORAGE_REALTEK=m
 CONFIG_REALTEK_AUTOPM=y
@@ -3799,20 +4038,24 @@ CONFIG_USB_STORAGE_ONETOUCH=m
 CONFIG_USB_STORAGE_KARMA=m
 CONFIG_USB_STORAGE_CYPRESS_ATACB=m
 CONFIG_USB_STORAGE_ENE_UB6250=m
-# CONFIG_USB_LIBUSUAL is not set
 
 #
 # USB Imaging devices
 #
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_DWC3=m
+CONFIG_USB_DWC3_HOST=y
+# CONFIG_USB_DWC3_DEBUG is not set
+CONFIG_USB_CHIPIDEA=m
+# CONFIG_USB_CHIPIDEA_HOST is not set
+# CONFIG_USB_CHIPIDEA_DEBUG is not set
 
 #
 # USB port drivers
 #
 CONFIG_USB_USS720=m
 CONFIG_USB_SERIAL=m
-CONFIG_USB_EZUSB=y
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_AIRCABLE=m
 CONFIG_USB_SERIAL_ARK3116=m
@@ -3822,7 +4065,7 @@ CONFIG_USB_SERIAL_WHITEHEAT=m
 CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
 CONFIG_USB_SERIAL_CP210X=m
 CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
+# CONFIG_USB_SERIAL_EMPEG is not set
 CONFIG_USB_SERIAL_FTDI_SIO=m
 CONFIG_USB_SERIAL_FUNSOFT=m
 CONFIG_USB_SERIAL_VISOR=m
@@ -3830,16 +4073,18 @@ CONFIG_USB_SERIAL_IPAQ=m
 CONFIG_USB_SERIAL_IR=m
 CONFIG_USB_SERIAL_EDGEPORT=m
 CONFIG_USB_SERIAL_EDGEPORT_TI=m
-CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_F81232=m
+# CONFIG_USB_SERIAL_GARMIN is not set
 CONFIG_USB_SERIAL_IPW=m
 CONFIG_USB_SERIAL_IUU=m
-# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
 # CONFIG_USB_SERIAL_KEYSPAN is not set
-# CONFIG_USB_SERIAL_KLSI is not set
+CONFIG_USB_SERIAL_KLSI=m
 # CONFIG_USB_SERIAL_KOBIL_SCT is not set
 CONFIG_USB_SERIAL_MCT_U232=m
+# CONFIG_USB_SERIAL_METRO is not set
 CONFIG_USB_SERIAL_MOS7720=m
-# CONFIG_USB_SERIAL_MOS7715_PARPORT is not set
+CONFIG_USB_SERIAL_MOS7715_PARPORT=y
 CONFIG_USB_SERIAL_MOS7840=m
 CONFIG_USB_SERIAL_MOTOROLA=m
 # CONFIG_USB_SERIAL_NAVMAN is not set
@@ -3848,22 +4093,25 @@ CONFIG_USB_SERIAL_OTI6858=m
 CONFIG_USB_SERIAL_QCAUX=m
 CONFIG_USB_SERIAL_QUALCOMM=m
 CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_HP4X=m
-CONFIG_USB_SERIAL_SAFE=m
-# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
 CONFIG_USB_SERIAL_SIEMENS_MPI=m
 CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_SYMBOL=m
-CONFIG_USB_SERIAL_TI=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
 # CONFIG_USB_SERIAL_CYBERJACK is not set
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_WWAN=m
 CONFIG_USB_SERIAL_OPTION=m
 # CONFIG_USB_SERIAL_OMNINET is not set
-CONFIG_USB_SERIAL_OPTICON=m
-CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
-CONFIG_USB_SERIAL_ZIO=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+CONFIG_USB_SERIAL_ZTE=m
 CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_QT2=m
 # CONFIG_USB_SERIAL_DEBUG is not set
 
 #
@@ -3871,14 +4119,14 @@ CONFIG_USB_SERIAL_SSU100=m
 #
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-# CONFIG_USB_ADUTUX is not set
+CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 CONFIG_USB_LCD=m
 CONFIG_USB_LED=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
 # CONFIG_USB_IDMOUSE is not set
 CONFIG_USB_FTDI_ELAN=m
 # CONFIG_USB_APPLEDISPLAY is not set
@@ -3888,21 +4136,17 @@ CONFIG_USB_SISUSBVGA_CON=y
 # CONFIG_USB_TRANCEVIBRATOR is not set
 CONFIG_USB_IOWARRIOR=m
 # CONFIG_USB_TEST is not set
-CONFIG_USB_ISIGHTFW=m
+# CONFIG_USB_ISIGHTFW is not set
 CONFIG_USB_YUREX=m
+CONFIG_USB_EZUSB_FX2=m
+CONFIG_USB_HSIC_USB3503=m
 CONFIG_USB_ATM=m
 CONFIG_USB_SPEEDTOUCH=m
 CONFIG_USB_CXACRU=m
 CONFIG_USB_UEAGLEATM=m
 CONFIG_USB_XUSBATM=m
+# CONFIG_USB_PHY is not set
 # CONFIG_USB_GADGET is not set
-
-#
-# OTG and related infrastructure
-#
-CONFIG_USB_OTG_UTILS=y
-CONFIG_USB_GPIO_VBUS=m
-CONFIG_NOP_USB_XCEIV=m
 CONFIG_UWB=m
 CONFIG_UWB_HWA=m
 CONFIG_UWB_WHCI=m
@@ -3918,23 +4162,26 @@ CONFIG_MMC=m
 CONFIG_MMC_BLOCK=m
 CONFIG_MMC_BLOCK_MINORS=8
 CONFIG_MMC_BLOCK_BOUNCE=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_TEST=m
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
 
 #
 # MMC/SD/SDIO Host Controller Drivers
 #
 CONFIG_MMC_SDHCI=m
 CONFIG_MMC_SDHCI_PCI=m
-# CONFIG_MMC_RICOH_MMC is not set
+CONFIG_MMC_RICOH_MMC=y
+CONFIG_MMC_SDHCI_ACPI=m
 CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_SDHCI_PXAV3=m
+CONFIG_MMC_SDHCI_PXAV2=m
 CONFIG_MMC_WBSD=m
 CONFIG_MMC_TIFM_SD=m
-CONFIG_MMC_SDRICOH_CS=m
 CONFIG_MMC_CB710=m
 CONFIG_MMC_VIA_SDMMC=m
 CONFIG_MMC_VUB300=m
 CONFIG_MMC_USHC=m
+CONFIG_MMC_REALTEK_PCI=m
 CONFIG_MEMSTICK=m
 # CONFIG_MEMSTICK_DEBUG is not set
 
@@ -3950,6 +4197,7 @@ CONFIG_MSPRO_BLOCK=m
 CONFIG_MEMSTICK_TIFM_MS=m
 CONFIG_MEMSTICK_JMICRON_38X=m
 CONFIG_MEMSTICK_R592=m
+CONFIG_MEMSTICK_REALTEK_PCI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 
@@ -3957,43 +4205,72 @@ CONFIG_LEDS_CLASS=y
 # LED drivers
 #
 CONFIG_LEDS_LM3530=m
-CONFIG_LEDS_NET48XX=m
-CONFIG_LEDS_NET5501=m
-CONFIG_LEDS_WRAP=m
-CONFIG_LEDS_PCA9532=m
-CONFIG_LEDS_PCA9532_GPIO=y
-CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LM3533=m
+CONFIG_LEDS_LM3642=m
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
 CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_LP55XX_COMMON=m
 CONFIG_LEDS_LP5521=m
 CONFIG_LEDS_LP5523=m
+CONFIG_LEDS_LP5562=m
 CONFIG_LEDS_CLEVO_MAIL=m
-CONFIG_LEDS_PCA955X=m
-CONFIG_LEDS_REGULATOR=m
-CONFIG_LEDS_BD2802=m
+# CONFIG_LEDS_PCA955X is not set
+CONFIG_LEDS_PCA9633=m
+# CONFIG_LEDS_BD2802 is not set
 CONFIG_LEDS_INTEL_SS4200=m
 CONFIG_LEDS_LT3593=m
 CONFIG_LEDS_DELL_NETBOOKS=m
-CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_RENESAS_TPU is not set
+CONFIG_LEDS_TCA6507=m
+CONFIG_LEDS_LM355x=m
+CONFIG_LEDS_OT200=m
+CONFIG_LEDS_BLINKM=m
 
 #
 # LED Triggers
 #
+CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_ONESHOT=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+# CONFIG_LEDS_TRIGGER_CPU is not set
 CONFIG_LEDS_TRIGGER_GPIO=m
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 
 #
 # iptables trigger is under Netfilter config (LED target)
 #
-CONFIG_LEDS_TRIGGER_NETDEV=m
+CONFIG_LEDS_TRIGGER_TRANSIENT=m
+CONFIG_LEDS_TRIGGER_CAMERA=m
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
-# CONFIG_EDAC is not set
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=m
+CONFIG_EDAC_MCE_INJ=m
+CONFIG_EDAC_MM_EDAC=m
+# CONFIG_EDAC_AMD76X is not set
+# CONFIG_EDAC_E7XXX is not set
+CONFIG_EDAC_E752X=m
+# CONFIG_EDAC_I82875P is not set
+CONFIG_EDAC_I82975X=m
+CONFIG_EDAC_I3000=m
+CONFIG_EDAC_I3200=m
+CONFIG_EDAC_X38=m
+CONFIG_EDAC_I5400=m
+CONFIG_EDAC_I7CORE=m
+# CONFIG_EDAC_I82860 is not set
+# CONFIG_EDAC_R82600 is not set
+CONFIG_EDAC_I5000=m
+CONFIG_EDAC_I5100=m
+CONFIG_EDAC_I7300=m
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
 CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
 # CONFIG_RTC_DEBUG is not set
 
@@ -4018,12 +4295,13 @@ CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_ISL1208=m
 CONFIG_RTC_DRV_ISL12022=m
 CONFIG_RTC_DRV_X1205=m
+CONFIG_RTC_DRV_PCF8523=m
 CONFIG_RTC_DRV_PCF8563=m
 CONFIG_RTC_DRV_PCF8583=m
 CONFIG_RTC_DRV_M41T80=m
 CONFIG_RTC_DRV_M41T80_WDT=y
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_S35390A=m
+# CONFIG_RTC_DRV_S35390A is not set
 CONFIG_RTC_DRV_FM3130=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_RX8025=m
@@ -4043,60 +4321,76 @@ CONFIG_RTC_DRV_DS1511=m
 CONFIG_RTC_DRV_DS1553=m
 CONFIG_RTC_DRV_DS1742=m
 CONFIG_RTC_DRV_STK17TA8=m
-CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T86 is not set
 CONFIG_RTC_DRV_M48T35=m
 CONFIG_RTC_DRV_M48T59=m
 CONFIG_RTC_DRV_MSM6242=m
 CONFIG_RTC_DRV_BQ4802=m
 CONFIG_RTC_DRV_RP5C01=m
 CONFIG_RTC_DRV_V3020=m
-CONFIG_RTC_DRV_PCF50633=m
+CONFIG_RTC_DRV_DS2404=m
 
 #
 # on-CPU RTC drivers
 #
+
+#
+# HID Sensor RTC drivers
+#
+CONFIG_RTC_DRV_HID_SENSOR_TIME=m
 CONFIG_DMADEVICES=y
 # CONFIG_DMADEVICES_DEBUG is not set
 
 #
 # DMA Devices
 #
-CONFIG_INTEL_MID_DMAC=m
+# CONFIG_INTEL_MID_DMAC is not set
 CONFIG_INTEL_IOATDMA=m
+CONFIG_DW_DMAC=m
+# CONFIG_DW_DMAC_BIG_ENDIAN_IO is not set
 CONFIG_TIMB_DMA=m
 CONFIG_PCH_DMA=m
 CONFIG_DMA_ENGINE=y
+CONFIG_DMA_ACPI=y
 
 #
 # DMA Clients
 #
 CONFIG_NET_DMA=y
 CONFIG_ASYNC_TX_DMA=y
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 CONFIG_DCA=m
 # CONFIG_AUXDISPLAY is not set
 CONFIG_UIO=m
-CONFIG_UIO_CIF=m
-CONFIG_UIO_PDRV=m
-CONFIG_UIO_PDRV_GENIRQ=m
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_DMEM_GENIRQ is not set
 CONFIG_UIO_AEC=m
 CONFIG_UIO_SERCOS3=m
 CONFIG_UIO_PCI_GENERIC=m
-CONFIG_UIO_NETX=m
+# CONFIG_UIO_NETX is not set
+# CONFIG_VFIO is not set
+CONFIG_VIRT_DRIVERS=y
 CONFIG_VIRTIO=y
-CONFIG_VIRTIO_RING=y
 
 #
 # Virtio drivers
 #
-CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=m
-CONFIG_VIRTIO_MMIO=m
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_HYPERV is not set
 
 #
 # Xen driver support
 #
 CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SELFBALLOONING=y
 CONFIG_XEN_SCRUB_PAGES=y
 CONFIG_XEN_DEV_EVTCHN=m
 CONFIG_XEN_BACKEND=y
@@ -4107,14 +4401,15 @@ CONFIG_XEN_XENBUS_FRONTEND=y
 CONFIG_XEN_GNTDEV=m
 CONFIG_XEN_GRANT_DEV_ALLOC=m
 CONFIG_SWIOTLB_XEN=y
+CONFIG_XEN_TMEM=m
 CONFIG_XEN_PCIDEV_BACKEND=m
+CONFIG_XEN_PRIVCMD=m
+CONFIG_XEN_ACPI_PROCESSOR=m
+CONFIG_XEN_HAVE_PVMMU=y
 CONFIG_STAGING=y
 CONFIG_ET131X=m
-CONFIG_SLICOSS=m
-CONFIG_USBIP_CORE=m
-CONFIG_USBIP_VHCI_HCD=m
-CONFIG_USBIP_HOST=m
-# CONFIG_USBIP_DEBUG is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
 # CONFIG_W35UND is not set
 # CONFIG_PRISM2_USB is not set
 # CONFIG_ECHO is not set
@@ -4123,42 +4418,95 @@ CONFIG_USBIP_HOST=m
 # CONFIG_PANEL is not set
 # CONFIG_R8187SE is not set
 # CONFIG_RTL8192U is not set
+CONFIG_RTLLIB=m
+CONFIG_RTLLIB_CRYPTO_CCMP=m
+CONFIG_RTLLIB_CRYPTO_TKIP=m
+CONFIG_RTLLIB_CRYPTO_WEP=m
 # CONFIG_RTL8192E is not set
 # CONFIG_R8712U is not set
-# CONFIG_RTS_PSTOR is not set
 # CONFIG_RTS5139 is not set
 # CONFIG_TRANZPORT is not set
-# CONFIG_POHMELFS is not set
-CONFIG_IDE_PHISON=m
+# CONFIG_IDE_PHISON is not set
 # CONFIG_LINE6_USB is not set
-CONFIG_DRM_NOUVEAU=m
-CONFIG_DRM_NOUVEAU_BACKLIGHT=y
-
-#
-# I2C encoder or helper chips
-#
-# CONFIG_DRM_I2C_CH7006 is not set
-# CONFIG_DRM_I2C_SIL164 is not set
 # CONFIG_USB_SERIAL_QUATECH2 is not set
-# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
 # CONFIG_VT6655 is not set
 # CONFIG_VT6656 is not set
-# CONFIG_HYPERV_STORAGE is not set
-CONFIG_HYPERV_NET=m
-CONFIG_HYPERV_MOUSE=m
-# CONFIG_VME_BUS is not set
 # CONFIG_DX_SEP is not set
-# CONFIG_IIO is not set
-# CONFIG_XVMALLOC is not set
-# CONFIG_ZRAM is not set
-# CONFIG_WLAGS49_H2 is not set
-# CONFIG_WLAGS49_H25 is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+
+#
+# Digital gyroscope sensors
+#
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+# CONFIG_IIO_SYSFS_TRIGGER is not set
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
 # CONFIG_FB_SM7XX is not set
 # CONFIG_CRYSTALHD is not set
-# CONFIG_CXT1E1 is not set
 # CONFIG_FB_XGI is not set
 # CONFIG_ACPI_QUICKSTART is not set
-# CONFIG_SBE_2T3E3 is not set
 # CONFIG_USB_ENESTORAGE is not set
 # CONFIG_BCM_WIMAX is not set
 # CONFIG_FT1000 is not set
@@ -4169,17 +4517,36 @@ CONFIG_HYPERV_MOUSE=m
 # CONFIG_SPEAKUP is not set
 # CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
 # CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
-# CONFIG_DRM_PSB is not set
-CONFIG_INTEL_MEI=m
 # CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_CSR_WIFI is not set
+CONFIG_NET_VENDOR_SILICOM=y
+CONFIG_SBYPASS=m
+CONFIG_BPCTL=m
+CONFIG_CED1401=m
+# CONFIG_DGRP is not set
+CONFIG_FIREWIRE_SERIAL=m
+CONFIG_USB_DWC2=m
+# CONFIG_USB_DWC2_DEBUG is not set
+# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
 CONFIG_X86_PLATFORM_DEVICES=y
 CONFIG_ACER_WMI=m
 CONFIG_ACERHDF=m
 CONFIG_ASUS_LAPTOP=m
+CONFIG_CHROMEOS_LAPTOP=m
+CONFIG_DELL_LAPTOP=m
 CONFIG_DELL_WMI=m
 CONFIG_DELL_WMI_AIO=m
 CONFIG_FUJITSU_LAPTOP=m
-CONFIG_FUJITSU_LAPTOP_DEBUG=y
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+# CONFIG_FUJITSU_TABLET is not set
+CONFIG_AMILO_RFKILL=m
 CONFIG_TC1100_WMI=m
 CONFIG_HP_ACCEL=m
 CONFIG_HP_WMI=m
@@ -4187,7 +4554,7 @@ CONFIG_MSI_LAPTOP=m
 CONFIG_PANASONIC_LAPTOP=m
 CONFIG_COMPAL_LAPTOP=m
 CONFIG_SONY_LAPTOP=m
-# CONFIG_SONYPI_COMPAT is not set
+CONFIG_SONYPI_COMPAT=y
 CONFIG_IDEAPAD_LAPTOP=m
 CONFIG_THINKPAD_ACPI=m
 CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
@@ -4197,21 +4564,34 @@ CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
 CONFIG_THINKPAD_ACPI_VIDEO=y
 CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
 CONFIG_SENSORS_HDAPS=m
-CONFIG_INTEL_MENLOW=m
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_ASUS_WMI=m
+CONFIG_ASUS_NB_WMI=m
+CONFIG_EEEPC_WMI=m
 CONFIG_ACPI_WMI=m
 CONFIG_MSI_WMI=m
-CONFIG_ACPI_ASUS=m
 CONFIG_TOPSTAR_LAPTOP=m
 CONFIG_ACPI_TOSHIBA=m
 CONFIG_TOSHIBA_BT_RFKILL=m
 CONFIG_ACPI_CMPC=m
 CONFIG_INTEL_IPS=m
-CONFIG_IBM_RTL=m
-CONFIG_XO15_EBOOK=m
+# CONFIG_IBM_RTL is not set
+# CONFIG_XO15_EBOOK is not set
 CONFIG_SAMSUNG_LAPTOP=m
 CONFIG_MXM_WMI=m
 CONFIG_INTEL_OAKTRAIL=m
 CONFIG_SAMSUNG_Q10=m
+# CONFIG_APPLE_GMUX is not set
+CONFIG_PVPANIC=m
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK=y
+
+#
+# Common Clock Framework
+#
+# CONFIG_COMMON_CLK_DEBUG is not set
 
 #
 # Hardware Spinlock drivers
@@ -4220,55 +4600,143 @@ CONFIG_CLKSRC_I8253=y
 CONFIG_CLKEVT_I8253=y
 CONFIG_I8253_LOCK=y
 CONFIG_CLKBLD_I8253=y
+CONFIG_MAILBOX=y
 CONFIG_IOMMU_API=y
 CONFIG_IOMMU_SUPPORT=y
 CONFIG_DMAR_TABLE=y
 CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_DEFAULT_ON=y
+# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
 CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_HYPERV=m
-CONFIG_HYPERV_UTILS=m
-CONFIG_PM_DEVFREQ=y
 
 #
-# DEVFREQ Governors
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
 #
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
-CONFIG_DEVFREQ_GOV_PERFORMANCE=y
-CONFIG_DEVFREQ_GOV_POWERSAVE=y
-CONFIG_DEVFREQ_GOV_USERSPACE=y
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+CONFIG_MEMORY=y
+CONFIG_IIO=m
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_BUFFER_CB=y
+CONFIG_IIO_KFIFO_BUF=m
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
 
 #
-# DEVFREQ Drivers
+# Accelerometers
 #
+# CONFIG_HID_SENSOR_ACCEL_3D is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+# CONFIG_VIPERBOARD_ADC is not set
+
+#
+# Amplifiers
+#
+
+#
+# Hid Sensor IIO Common
+#
+CONFIG_HID_SENSOR_IIO_COMMON=m
+CONFIG_HID_SENSOR_IIO_TRIGGER=m
+# CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS is not set
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5446 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_HID_SENSOR_GYRO_3D is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_LM3533 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+# CONFIG_HID_SENSOR_ALS is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_HID_SENSOR_MAGNETOMETER_3D is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
 
 #
 # Firmware Drivers
 #
-# CONFIG_EDD is not set
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
 CONFIG_FIRMWARE_MEMMAP=y
-# CONFIG_DELL_RBU is not set
-# CONFIG_DCDBAS is not set
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
 CONFIG_DMIID=y
-CONFIG_DMI_SYSFS=m
+CONFIG_DMI_SYSFS=y
 CONFIG_ISCSI_IBFT_FIND=y
 CONFIG_ISCSI_IBFT=m
-CONFIG_SIGMA=m
 # CONFIG_GOOGLE_FIRMWARE is not set
 
+#
+# EFI (Extensible Firmware Interface) Support
+#
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE=y
+# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
+
 #
 # File systems
 #
+CONFIG_DCACHE_WORD_ACCESS=y
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_USE_FOR_EXT23=y
-CONFIG_EXT4_FS_XATTR=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
 # CONFIG_EXT4_DEBUG is not set
 CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
 CONFIG_FS_MBCACHE=y
 CONFIG_REISERFS_FS=m
 # CONFIG_REISERFS_CHECK is not set
@@ -4276,43 +4744,47 @@ CONFIG_REISERFS_PROC_INFO=y
 CONFIG_REISERFS_FS_XATTR=y
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
+# CONFIG_JFS_FS is not set
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
-# CONFIG_XFS_RT is not set
+CONFIG_XFS_RT=y
+# CONFIG_XFS_WARN is not set
 # CONFIG_XFS_DEBUG is not set
 # CONFIG_GFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
+# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
+# CONFIG_BTRFS_DEBUG is not set
 # CONFIG_NILFS2_FS is not set
 CONFIG_FS_POSIX_ACL=y
-CONFIG_EXPORTFS=m
+CONFIG_EXPORTFS=y
 CONFIG_FILE_LOCKING=y
 CONFIG_FSNOTIFY=y
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
 # CONFIG_QUOTA_DEBUG is not set
-CONFIG_QUOTA_TREE=m
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
 CONFIG_QUOTACTL=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_GENERIC_ACL=y
 
 #
 # Caches
 #
 CONFIG_FSCACHE=m
-# CONFIG_FSCACHE_STATS is not set
+CONFIG_FSCACHE_STATS=y
 # CONFIG_FSCACHE_HISTOGRAM is not set
 # CONFIG_FSCACHE_DEBUG is not set
 # CONFIG_FSCACHE_OBJECT_LIST is not set
@@ -4323,7 +4795,7 @@ CONFIG_CACHEFILES=m
 #
 # CD-ROM/DVD Filesystems
 #
-CONFIG_ISO9660_FS=m
+CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
@@ -4333,10 +4805,10 @@ CONFIG_UDF_NLS=y
 # DOS/FAT/NT Filesystems
 #
 CONFIG_FAT_FS=m
-# CONFIG_MSDOS_FS is not set
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=850
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 # CONFIG_NTFS_FS is not set
 
 #
@@ -4345,39 +4817,22 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 CONFIG_PROC_FS=y
 # CONFIG_PROC_KCORE is not set
 CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
+CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_TMPFS_XATTR=y
 # CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
-CONFIG_ECRYPT_FS=m
+# CONFIG_ECRYPT_FS is not set
 # CONFIG_HFS_FS is not set
 # CONFIG_HFSPLUS_FS is not set
 # CONFIG_BEFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
-# CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-# CONFIG_JFFS2_LZO is not set
-CONFIG_JFFS2_RTIME=y
-CONFIG_JFFS2_RUBIN=y
-# CONFIG_JFFS2_CMODE_NONE is not set
-CONFIG_JFFS2_CMODE_PRIORITY=y
-# CONFIG_JFFS2_CMODE_SIZE is not set
-# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
-# CONFIG_UBIFS_FS is not set
 # CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_SQUASHFS is not set
@@ -4386,113 +4841,117 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
 # CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
 # CONFIG_ROMFS_FS is not set
-# CONFIG_PSTORE is not set
+CONFIG_PSTORE=y
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_FTRACE is not set
+# CONFIG_PSTORE_RAM is not set
 # CONFIG_SYSV_FS is not set
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
+# CONFIG_UFS_FS is not set
 # CONFIG_EXOFS_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_EFIVAR_FS=m
+CONFIG_ORE=m
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
+CONFIG_NFS_V2=m
+CONFIG_NFS_V3=m
 CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-# CONFIG_NFS_V4_1 is not set
-# CONFIG_NFS_FSCACHE is not set
+CONFIG_NFS_V4=m
+# CONFIG_NFS_SWAP is not set
+CONFIG_NFS_V4_1=y
+CONFIG_PNFS_FILE_LAYOUT=m
+CONFIG_PNFS_BLOCK=m
+CONFIG_PNFS_OBJLAYOUT=m
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="ipfire.org"
+CONFIG_NFS_FSCACHE=y
 # CONFIG_NFS_USE_LEGACY_DNS is not set
 CONFIG_NFS_USE_KERNEL_DNS=y
-# CONFIG_NFS_USE_NEW_IDMAPPER is not set
 CONFIG_NFSD=m
 CONFIG_NFSD_V2_ACL=y
 CONFIG_NFSD_V3=y
 CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
+# CONFIG_NFSD_FAULT_INJECTION is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_ACL_SUPPORT=m
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC_GSS=m
+CONFIG_SUNRPC_BACKCHANNEL=y
 CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_SUNRPC_DEBUG is not set
 # CONFIG_CEPH_FS is not set
 CONFIG_CIFS=m
 CONFIG_CIFS_STATS=y
-CONFIG_CIFS_STATS2=y
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_WEAK_PW_HASH is not set
 # CONFIG_CIFS_UPCALL is not set
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-# CONFIG_CIFS_DEBUG2 is not set
-# CONFIG_CIFS_DFS_UPCALL is not set
-# CONFIG_CIFS_FSCACHE is not set
 CONFIG_CIFS_ACL=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_SMB2=y
+CONFIG_CIFS_FSCACHE=y
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-CONFIG_LDM_PARTITION=y
-# CONFIG_LDM_DEBUG is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-# CONFIG_SYSV68_PARTITION is not set
 CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="cp850"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-CONFIG_NLS_CODEPAGE_850=y
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=y
+CONFIG_NLS_DEFAULT="utf-8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+# CONFIG_DLM_DEBUG is not set
 
 #
 # Kernel hacking
@@ -4502,27 +4961,32 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
 CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
 # CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
 # CONFIG_DEBUG_SECTION_MISMATCH is not set
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-# CONFIG_LOCKUP_DETECTOR is not set
-# CONFIG_HARDLOCKUP_DETECTOR is not set
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+# CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
+CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_OBJECTS is not set
 # CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
 # CONFIG_DEBUG_KMEMLEAK is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
@@ -4530,10 +4994,10 @@ CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_LOCK_ALLOC is not set
 # CONFIG_PROVE_LOCKING is not set
-# CONFIG_SPARSE_RCU_POINTER is not set
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_HIGHMEM is not set
@@ -4543,38 +5007,79 @@ CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_VIRTUAL is not set
 # CONFIG_DEBUG_WRITECOUNT is not set
 CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_LIST is not set
+CONFIG_DEBUG_LIST=y
 # CONFIG_TEST_LIST_SORT is not set
 # CONFIG_DEBUG_SG is not set
 # CONFIG_DEBUG_NOTIFIERS is not set
 # CONFIG_DEBUG_CREDENTIALS is not set
 CONFIG_ARCH_WANT_FRAME_POINTERS=y
-# CONFIG_FRAME_POINTER is not set
+CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_SPARSE_RCU_POINTER is not set
 # CONFIG_RCU_TORTURE_TEST is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
 # CONFIG_DEBUG_PER_CPU_MAPS is not set
-# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
 # CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
 # CONFIG_DEBUG_PAGEALLOC is not set
 CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
 CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
 CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
 CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
 CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
 CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
 CONFIG_TRACING_SUPPORT=y
-# CONFIG_FTRACE is not set
-# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
-# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+CONFIG_RING_BUFFER_BENCHMARK=m
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+CONFIG_RBTREE_TEST=m
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_BUILD_DOCSRC is not set
+CONFIG_DYNAMIC_DEBUG=y
 # CONFIG_DMA_API_DEBUG is not set
 # CONFIG_ATOMIC64_SELFTEST is not set
 CONFIG_ASYNC_RAID6_TEST=m
@@ -4582,19 +5087,17 @@ CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 CONFIG_HAVE_ARCH_KMEMCHECK=y
-# CONFIG_KMEMCHECK is not set
+# CONFIG_TEST_STRING_HELPERS is not set
 # CONFIG_TEST_KSTRTOX is not set
-# CONFIG_STRICT_DEVMEM is not set
-CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_STRICT_DEVMEM=y
+# CONFIG_X86_VERBOSE_BOOTUP is not set
 CONFIG_EARLY_PRINTK=y
 # CONFIG_EARLY_PRINTK_DBGP is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_X86_PTDUMP is not set
-CONFIG_DEBUG_RODATA=y
-CONFIG_DEBUG_RODATA_TEST=y
-# CONFIG_DEBUG_SET_MODULE_RONX is not set
-# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DEBUG_NX_TEST=m
 CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
 # CONFIG_IOMMU_STRESS is not set
 CONFIG_HAVE_MMIOTRACE_SUPPORT=y
 CONFIG_IO_DELAY_TYPE_0X80=0
@@ -4606,20 +5109,182 @@ CONFIG_IO_DELAY_0X80=y
 # CONFIG_IO_DELAY_UDELAY is not set
 # CONFIG_IO_DELAY_NONE is not set
 CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
 # CONFIG_CPA_DEBUG is not set
-# CONFIG_OPTIMIZE_INLINING is not set
-# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
 
 #
 # Security options
 #
+
+#
+# Grsecurity
+#
+CONFIG_ARCH_TRACK_EXEC_LIMIT=y
+CONFIG_PAX_USERCOPY_SLABS=y
+CONFIG_GRKERNSEC=y
+# CONFIG_GRKERNSEC_CONFIG_AUTO is not set
+CONFIG_GRKERNSEC_CONFIG_CUSTOM=y
+
+#
+# Customize Configuration
+#
+
+#
+# PaX
+#
+CONFIG_PAX=y
+
+#
+# PaX Control
+#
+# CONFIG_PAX_SOFTMODE is not set
+CONFIG_PAX_EI_PAX=y
+CONFIG_PAX_PT_PAX_FLAGS=y
+# CONFIG_PAX_XATTR_PAX_FLAGS is not set
+# CONFIG_PAX_NO_ACL_FLAGS is not set
+CONFIG_PAX_HAVE_ACL_FLAGS=y
+# CONFIG_PAX_HOOK_ACL_FLAGS is not set
+
+#
+# Non-executable pages
+#
+CONFIG_PAX_NOEXEC=y
+CONFIG_PAX_PAGEEXEC=y
+CONFIG_PAX_SEGMEXEC=y
+CONFIG_PAX_EMUTRAMP=y
+CONFIG_PAX_MPROTECT=y
+# CONFIG_PAX_MPROTECT_COMPAT is not set
+CONFIG_PAX_ELFRELOCS=y
+CONFIG_PAX_KERNEXEC_PLUGIN_METHOD=""
+
+#
+# Address Space Layout Randomization
+#
+CONFIG_PAX_ASLR=y
+CONFIG_PAX_RANDKSTACK=y
+CONFIG_PAX_RANDUSTACK=y
+CONFIG_PAX_RANDMMAP=y
+
+#
+# Miscellaneous hardening features
+#
+CONFIG_PAX_MEMORY_STACKLEAK=y
+CONFIG_PAX_MEMORY_STRUCTLEAK=y
+CONFIG_PAX_REFCOUNT=y
+CONFIG_PAX_USERCOPY=y
+# CONFIG_PAX_USERCOPY_DEBUG is not set
+# CONFIG_PAX_SIZE_OVERFLOW is not set
+# CONFIG_PAX_LATENT_ENTROPY is not set
+
+#
+# Memory Protections
+#
+CONFIG_GRKERNSEC_KMEM=y
+CONFIG_GRKERNSEC_VM86=y
+# CONFIG_GRKERNSEC_IO is not set
+# CONFIG_GRKERNSEC_PERF_HARDEN is not set
+CONFIG_GRKERNSEC_RAND_THREADSTACK=y
+CONFIG_GRKERNSEC_PROC_MEMMAP=y
+CONFIG_GRKERNSEC_BRUTE=y
+CONFIG_GRKERNSEC_MODHARDEN=y
+CONFIG_GRKERNSEC_HIDESYM=y
+CONFIG_GRKERNSEC_KERN_LOCKOUT=y
+
+#
+# Role Based Access Control Options
+#
+CONFIG_GRKERNSEC_NO_RBAC=y
+# CONFIG_GRKERNSEC_ACL_HIDEKERN is not set
+CONFIG_GRKERNSEC_ACL_MAXTRIES=3
+CONFIG_GRKERNSEC_ACL_TIMEOUT=30
+
+#
+# Filesystem Protections
+#
+# CONFIG_GRKERNSEC_PROC is not set
+CONFIG_GRKERNSEC_LINK=y
+# CONFIG_GRKERNSEC_SYMLINKOWN is not set
+CONFIG_GRKERNSEC_FIFO=y
+# CONFIG_GRKERNSEC_SYSFS_RESTRICT is not set
+# CONFIG_GRKERNSEC_ROFS is not set
+CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL=y
+CONFIG_GRKERNSEC_CHROOT=y
+CONFIG_GRKERNSEC_CHROOT_MOUNT=y
+CONFIG_GRKERNSEC_CHROOT_DOUBLE=y
+CONFIG_GRKERNSEC_CHROOT_PIVOT=y
+CONFIG_GRKERNSEC_CHROOT_CHDIR=y
+# CONFIG_GRKERNSEC_CHROOT_CHMOD is not set
+CONFIG_GRKERNSEC_CHROOT_FCHDIR=y
+# CONFIG_GRKERNSEC_CHROOT_MKNOD is not set
+CONFIG_GRKERNSEC_CHROOT_SHMAT=y
+CONFIG_GRKERNSEC_CHROOT_UNIX=y
+CONFIG_GRKERNSEC_CHROOT_FINDTASK=y
+CONFIG_GRKERNSEC_CHROOT_NICE=y
+CONFIG_GRKERNSEC_CHROOT_SYSCTL=y
+# CONFIG_GRKERNSEC_CHROOT_CAPS is not set
+CONFIG_GRKERNSEC_CHROOT_INITRD=y
+
+#
+# Kernel Auditing
+#
+# CONFIG_GRKERNSEC_AUDIT_GROUP is not set
+# CONFIG_GRKERNSEC_EXECLOG is not set
+CONFIG_GRKERNSEC_RESLOG=y
+# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set
+# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set
+# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set
+# CONFIG_GRKERNSEC_AUDIT_MOUNT is not set
+CONFIG_GRKERNSEC_SIGNAL=y
+CONFIG_GRKERNSEC_FORKFAIL=y
+# CONFIG_GRKERNSEC_TIME is not set
+CONFIG_GRKERNSEC_PROC_IPADDR=y
+# CONFIG_GRKERNSEC_RWXMAP_LOG is not set
+
+#
+# Executable Protections
+#
+CONFIG_GRKERNSEC_DMESG=y
+CONFIG_GRKERNSEC_HARDEN_PTRACE=y
+CONFIG_GRKERNSEC_PTRACE_READEXEC=y
+CONFIG_GRKERNSEC_SETXID=y
+# CONFIG_GRKERNSEC_TPE is not set
+
+#
+# Network Protections
+#
+CONFIG_GRKERNSEC_RANDNET=y
+CONFIG_GRKERNSEC_BLACKHOLE=y
+CONFIG_GRKERNSEC_NO_SIMULT_CONNECT=y
+# CONFIG_GRKERNSEC_SOCKET is not set
+
+#
+# Sysctl Support
+#
+# CONFIG_GRKERNSEC_SYSCTL is not set
+
+#
+# Logging Options
+#
+CONFIG_GRKERNSEC_FLOODTIME=10
+CONFIG_GRKERNSEC_FLOODBURST=6
 CONFIG_KEYS=y
 # CONFIG_ENCRYPTED_KEYS is not set
-# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-# CONFIG_SECURITY_DMESG_RESTRICT is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY_DMESG_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+# CONFIG_SECURITY_PATH is not set
 # CONFIG_INTEL_TXT is not set
+# CONFIG_SECURITY_SELINUX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
 CONFIG_DEFAULT_SECURITY_DAC=y
 CONFIG_DEFAULT_SECURITY=""
 CONFIG_XOR_BLOCKS=m
@@ -4628,58 +5293,60 @@ CONFIG_ASYNC_MEMCPY=m
 CONFIG_ASYNC_XOR=m
 CONFIG_ASYNC_PQ=m
 CONFIG_ASYNC_RAID6_RECOV=m
-CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
-CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
+CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD=y
 CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER=y
 CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
 CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG=y
 CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_PCOMP=m
 CONFIG_CRYPTO_PCOMP2=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_GF128MUL=m
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_USER is not set
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_CRYPTD=y
 CONFIG_CRYPTO_AUTHENC=m
-# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+CONFIG_CRYPTO_GLUE_HELPER_X86=m
 
 #
 # Authenticated Encryption with Associated Data
 #
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_SEQIV=y
 
 #
 # Block modes
 #
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XTS=y
 
 #
 # Hash modes
 #
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -4687,18 +5354,20 @@ CONFIG_CRYPTO_VMAC=m
 #
 # Digest
 #
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_CRC32C_INTEL=y
+CONFIG_CRYPTO_CRC32=y
+CONFIG_CRYPTO_CRC32_PCLMUL=m
 CONFIG_CRYPTO_GHASH=m
 CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
@@ -4706,23 +5375,25 @@ CONFIG_CRYPTO_WP512=m
 #
 # Ciphers
 #
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_AES_586=m
-CONFIG_CRYPTO_AES_NI_INTEL=m
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_BLOWFISH_COMMON=m
 CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST_COMMON=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SALSA20_586=m
+# CONFIG_CRYPTO_SALSA20_586 is not set
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SERPENT_SSE2_586=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_TWOFISH_COMMON=m
@@ -4731,7 +5402,7 @@ CONFIG_CRYPTO_TWOFISH_586=m
 #
 # Compression
 #
-CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEFLATE=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 
@@ -4739,45 +5410,63 @@ CONFIG_CRYPTO_LZO=m
 # Random Number Generation
 #
 CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_USER_API=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
 CONFIG_CRYPTO_HW=y
 CONFIG_CRYPTO_DEV_PADLOCK=m
 CONFIG_CRYPTO_DEV_PADLOCK_AES=m
 CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
 CONFIG_CRYPTO_DEV_GEODE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_HAVE_KVM=y
 CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
 CONFIG_HAVE_KVM_EVENTFD=y
 CONFIG_KVM_APIC_ARCHITECTURE=y
 CONFIG_KVM_MMIO=y
 CONFIG_KVM_ASYNC_PF=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_KVM_INTEL=m
 CONFIG_KVM_AMD=m
-CONFIG_VHOST_NET=m
-CONFIG_LGUEST=m
-# CONFIG_BINARY_PRINTF is not set
+CONFIG_KVM_MMU_AUDIT=y
+CONFIG_KVM_DEVICE_ASSIGNMENT=y
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
 
 #
 # Library routines
 #
 CONFIG_RAID6_PQ=m
 CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
 CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_CRC_CCITT=m
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
 CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=m
+CONFIG_CRC_T10DIF=y
 CONFIG_CRC_ITU_T=m
 CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
 CONFIG_CRC7=m
 CONFIG_LIBCRC32C=m
 CONFIG_CRC8=m
 CONFIG_AUDIT_GENERIC=y
 CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_DEFLATE=m
 CONFIG_LZO_COMPRESS=y
 CONFIG_LZO_DECOMPRESS=y
 CONFIG_XZ_DEC=y
@@ -4794,6 +5483,7 @@ CONFIG_DECOMPRESS_BZIP2=y
 CONFIG_DECOMPRESS_LZMA=y
 CONFIG_DECOMPRESS_XZ=y
 CONFIG_DECOMPRESS_LZO=y
+CONFIG_GENERIC_ALLOCATOR=y
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_TEXTSEARCH_BM=m
@@ -4803,6 +5493,13 @@ CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_CHECK_SIGNATURE=y
 CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
 CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
 CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
 CONFIG_CORDIC=m
+# CONFIG_DDR is not set
+CONFIG_MPILIB=m
+CONFIG_OID_REGISTRY=m
+CONFIG_UCS2_STRING=y
diff --git a/config/rootfiles/common/HTML-Template b/config/rootfiles/common/HTML-Template
new file mode 100644 (file)
index 0000000..4b486d9
--- /dev/null
@@ -0,0 +1,7 @@
+usr/lib/perl5/site_perl/5.12.3/HTML/Template
+usr/lib/perl5/site_perl/5.12.3/HTML/Template.pm
+usr/lib/perl5/site_perl/5.12.3/HTML/Template/FAQ.pm
+#usr/lib/perl5/site_perl/5.12.3/MACHINE-linux-thread-multi/auto/HTML/Template
+#usr/lib/perl5/site_perl/5.12.3/MACHINE-linux-thread-multi/auto/HTML/Template/.packlist
+#usr/share/man/man3/HTML::Template.3
+#usr/share/man/man3/HTML::Template::FAQ.3
index 759d41d76f5456bc93e3e2c819f2c34d5ebe69ac..1048dc8182a91d6e838c060d79a06b220b3d7073 100644 (file)
@@ -1,6 +1,12 @@
-#boot/MLO
-#boot/u-boot.bin
 usr/bin/mkimage
-boot/boot.scr
-boot/boot.script
-boot/convert_bootscript
+#usr/share/u-boot
+#usr/share/u-boot/pandaboard
+#usr/share/u-boot/pandaboard/MLO
+#usr/share/u-boot/pandaboard/u-boot.bin
+#usr/share/u-boot/pandaboard/u-boot.img
+#usr/share/u-boot/wandboard_dl
+#usr/share/u-boot/wandboard_dl/u-boot.imx
+#usr/share/u-boot/wandboard_quad
+#usr/share/u-boot/wandboard_quad/u-boot.imx
+#usr/share/u-boot/wandboard_solo
+#usr/share/u-boot/wandboard_solo/u-boot.imx
diff --git a/config/rootfiles/common/armv5tel/u-boot-panda b/config/rootfiles/common/armv5tel/u-boot-panda
new file mode 100644 (file)
index 0000000..544d9d7
--- /dev/null
@@ -0,0 +1,5 @@
+#boot/MLO
+#boot/u-boot.bin
+boot/boot.scr
+boot/boot.script
+boot/convert_bootscript
index 39225a43ba69a572c16b273ce89ae9a6724c344d..907783052746fc9b6826cf4a57f620d7f27a87d4 100644 (file)
@@ -16,12 +16,19 @@ lib/libiptc.so.0
 lib/libiptc.so.0.0.0
 #lib/libxtables.la
 lib/libxtables.so
-lib/libxtables.so.7
-lib/libxtables.so.7.0.0
+lib/libxtables.so.10
+lib/libxtables.so.10.0.0
 lib/xtables
+#lib/xtables/libip6t_DNAT.so
+#lib/xtables/libip6t_DNPT.so
 #lib/xtables/libip6t_HL.so
 #lib/xtables/libip6t_LOG.so
+#lib/xtables/libip6t_MASQUERADE.so
+#lib/xtables/libip6t_NETMAP.so
+#lib/xtables/libip6t_REDIRECT.so
 #lib/xtables/libip6t_REJECT.so
+#lib/xtables/libip6t_SNAT.so
+#lib/xtables/libip6t_SNPT.so
 #lib/xtables/libip6t_ah.so
 #lib/xtables/libip6t_dst.so
 #lib/xtables/libip6t_eui64.so
@@ -58,6 +65,7 @@ lib/xtables
 #lib/xtables/libxt_CONNSECMARK.so
 #lib/xtables/libxt_CT.so
 #lib/xtables/libxt_DSCP.so
+#lib/xtables/libxt_HMARK.so
 #lib/xtables/libxt_IDLETIMER.so
 #lib/xtables/libxt_IMQ.so
 #lib/xtables/libxt_LED.so
@@ -75,6 +83,7 @@ lib/xtables
 #lib/xtables/libxt_TPROXY.so
 #lib/xtables/libxt_TRACE.so
 #lib/xtables/libxt_addrtype.so
+#lib/xtables/libxt_bpf.so
 #lib/xtables/libxt_cluster.so
 #lib/xtables/libxt_comment.so
 #lib/xtables/libxt_connbytes.so
@@ -128,9 +137,6 @@ sbin/iptables-restore
 sbin/iptables-save
 sbin/iptables-xml
 sbin/xtables-multi
-#usr/include/iptables
-#usr/include/iptables.h
-#usr/include/iptables/internal.h
 #usr/include/libipq.h
 #usr/include/libiptc
 #usr/include/libiptc/ipt_kernel_headers.h
@@ -138,8 +144,6 @@ sbin/xtables-multi
 #usr/include/libiptc/libiptc.h
 #usr/include/libiptc/libxtc.h
 #usr/include/libiptc/xtcshared.h
-#usr/include/libipulog
-#usr/include/libipulog/libipulog.h
 #usr/include/libnetfilter_conntrack
 #usr/include/libnetfilter_conntrack/libnetfilter_conntrack.h
 #usr/include/libnetfilter_conntrack/libnetfilter_conntrack_dccp.h
@@ -153,36 +157,13 @@ sbin/xtables-multi
 #usr/include/libnetfilter_cttimeout
 #usr/include/libnetfilter_cttimeout/libnetfilter_cttimeout.h
 #usr/include/libnetfilter_queue
-#usr/include/libnetfilter_queue/libipq.h
 #usr/include/libnetfilter_queue/libnetfilter_queue.h
 #usr/include/libnetfilter_queue/linux_nfnetlink_queue.h
 #usr/include/libnfnetlink
 #usr/include/libnfnetlink/libnfnetlink.h
 #usr/include/libnfnetlink/linux_nfnetlink.h
 #usr/include/libnfnetlink/linux_nfnetlink_compat.h
-#usr/include/linux/netfilter/Kbuild
-#usr/include/linux/netfilter/ipset/Kbuild
-#usr/include/linux/netfilter/ipset/ip_set_ahash.h
-#usr/include/linux/netfilter/ipset/ip_set_getport.h
-#usr/include/linux/netfilter/ipset/ip_set_timeout.h
-#usr/include/linux/netfilter/ipset/pfxlen.h
-#usr/include/linux/netfilter/nf_conntrack_amanda.h
-#usr/include/linux/netfilter/nf_conntrack_dccp.h
-#usr/include/linux/netfilter/nf_conntrack_h323.h
-#usr/include/linux/netfilter/nf_conntrack_h323_asn1.h
-#usr/include/linux/netfilter/nf_conntrack_h323_types.h
-#usr/include/linux/netfilter/nf_conntrack_irc.h
-#usr/include/linux/netfilter/nf_conntrack_pptp.h
-#usr/include/linux/netfilter/nf_conntrack_proto_gre.h
-#usr/include/linux/netfilter/nf_conntrack_sane.h
-#usr/include/linux/netfilter/nf_conntrack_sip.h
-#usr/include/linux/netfilter/nf_conntrack_snmp.h
-#usr/include/linux/netfilter/nf_conntrack_tftp.h
-#usr/include/linux/netfilter/xt_IMQ.h
-#usr/include/linux/netfilter/xt_layer7.h
-#usr/include/net/netfilter
-#usr/include/net/netfilter/nf_conntrack_tuple.h
-#usr/include/net/netfilter/nf_nat.h
+#usr/include/xtables-version.h
 #usr/include/xtables.h
 #usr/lib/libnetfilter_conntrack.la
 usr/lib/libnetfilter_conntrack.so
@@ -192,16 +173,10 @@ usr/lib/libnetfilter_conntrack.so.3.4.0
 usr/lib/libnetfilter_cttimeout.so
 usr/lib/libnetfilter_cttimeout.so.1
 usr/lib/libnetfilter_cttimeout.so.1.0.0
-#usr/lib/libnetfilter_queue.a
 #usr/lib/libnetfilter_queue.la
 usr/lib/libnetfilter_queue.so
 usr/lib/libnetfilter_queue.so.1
-usr/lib/libnetfilter_queue.so.1.1.0
-#usr/lib/libnetfilter_queue_libipq.a
-#usr/lib/libnetfilter_queue_libipq.la
-usr/lib/libnetfilter_queue_libipq.so
-usr/lib/libnetfilter_queue_libipq.so.1
-usr/lib/libnetfilter_queue_libipq.so.1.0.0
+usr/lib/libnetfilter_queue.so.1.2.0
 #usr/lib/libnfnetlink.a
 #usr/lib/libnfnetlink.la
 usr/lib/libnfnetlink.so
@@ -231,6 +206,7 @@ usr/lib/libnfnetlink.so.0.2.0
 #usr/share/man/man8/ip6tables-restore.8
 #usr/share/man/man8/ip6tables-save.8
 #usr/share/man/man8/ip6tables.8
+#usr/share/man/man8/iptables-extensions.8
 #usr/share/man/man8/iptables-restore.8
 #usr/share/man/man8/iptables-save.8
 #usr/share/man/man8/iptables.8
index 2beb80010f517086882c74b813772162df2ce7a7..960e1c82738c7b2f92c32330a0b8e7b56dcc053d 100644 (file)
@@ -53,7 +53,8 @@
 #usr/include/netlink/socket.h
 #usr/include/netlink/types.h
 #usr/include/netlink/utils.h
+#usr/lib/libnl.a
 usr/lib/libnl.so
 usr/lib/libnl.so.1
-usr/lib/libnl.so.1.1
+usr/lib/libnl.so.1.1.4
 #usr/lib/pkgconfig/libnl-1.pc
index 671f2174ae5cfd64c2f9203cc32f961d7e2ab792..9515dc3a0088008c7f4aa1a1defea0a94a0d8608 100644 (file)
@@ -14,10 +14,29 @@ etc/squid/squid.conf
 srv/web/ipfire/cgi-bin/cachemgr.cgi
 srv/web/ipfire/html/proxy.pac
 srv/web/ipfire/html/wpad.dat
+usr/bin/purge
 usr/bin/squidclient
 #usr/lib/squid
 usr/lib/squid/auth
+usr/lib/squid/basic_db_auth
+usr/lib/squid/basic_fake_auth
+usr/lib/squid/basic_getpwnam_auth
+usr/lib/squid/basic_ldap_auth
+usr/lib/squid/basic_msnt_auth
+usr/lib/squid/basic_msnt_multi_domain_auth
+usr/lib/squid/basic_ncsa_auth
+usr/lib/squid/basic_nis_auth
+usr/lib/squid/basic_pam_auth
+usr/lib/squid/basic_pop3_auth
+usr/lib/squid/basic_radius_auth
+usr/lib/squid/basic_sasl_auth
+usr/lib/squid/basic_smb_auth
+usr/lib/squid/basic_smb_auth.sh
 #usr/lib/squid/cachemgr.cgi
+usr/lib/squid/cert_tool
+usr/lib/squid/digest_edirectory_auth
+usr/lib/squid/digest_file_auth
+usr/lib/squid/digest_ldap_auth
 usr/lib/squid/diskd
 #usr/lib/squid/errors
 #usr/lib/squid/errors/COPYRIGHT
@@ -30,6 +49,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/af/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/af/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/af/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/af/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/af/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/af/ERR_DIR_LISTING
 #usr/lib/squid/errors/af/ERR_DNS_FAIL
@@ -88,6 +108,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/ar/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ar/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ar/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ar/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ar/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ar/ERR_DIR_LISTING
 #usr/lib/squid/errors/ar/ERR_DNS_FAIL
@@ -131,6 +152,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/az/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/az/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/az/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/az/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/az/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/az/ERR_DIR_LISTING
 #usr/lib/squid/errors/az/ERR_DNS_FAIL
@@ -174,6 +196,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/bg/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/bg/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/bg/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/bg/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/bg/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/bg/ERR_DIR_LISTING
 #usr/lib/squid/errors/bg/ERR_DNS_FAIL
@@ -216,6 +239,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/ca/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ca/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ca/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ca/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ca/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ca/ERR_DIR_LISTING
 #usr/lib/squid/errors/ca/ERR_DNS_FAIL
@@ -259,6 +283,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/cs/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/cs/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/cs/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/cs/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/cs/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/cs/ERR_DIR_LISTING
 #usr/lib/squid/errors/cs/ERR_DNS_FAIL
@@ -302,6 +327,7 @@ usr/lib/squid/diskd
 #usr/lib/squid/errors/da/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/da/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/da/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/da/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/da/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/da/ERR_DIR_LISTING
 #usr/lib/squid/errors/da/ERR_DNS_FAIL
@@ -349,6 +375,7 @@ usr/lib/squid/errors/de/ERR_AGENT_WPAD
 usr/lib/squid/errors/de/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/de/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/de/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/de/ERR_CONFLICT_HOST
 usr/lib/squid/errors/de/ERR_CONNECT_FAIL
 usr/lib/squid/errors/de/ERR_DIR_LISTING
 usr/lib/squid/errors/de/ERR_DNS_FAIL
@@ -392,6 +419,7 @@ usr/lib/squid/errors/de/error-details.txt
 #usr/lib/squid/errors/el/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/el/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/el/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/el/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/el/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/el/ERR_DIR_LISTING
 #usr/lib/squid/errors/el/ERR_DNS_FAIL
@@ -449,6 +477,7 @@ usr/lib/squid/errors/en/ERR_AGENT_WPAD
 usr/lib/squid/errors/en/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/en/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/en/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/en/ERR_CONFLICT_HOST
 usr/lib/squid/errors/en/ERR_CONNECT_FAIL
 usr/lib/squid/errors/en/ERR_DIR_LISTING
 usr/lib/squid/errors/en/ERR_DNS_FAIL
@@ -510,6 +539,7 @@ usr/lib/squid/errors/es/ERR_AGENT_WPAD
 usr/lib/squid/errors/es/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/es/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/es/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/es/ERR_CONFLICT_HOST
 usr/lib/squid/errors/es/ERR_CONNECT_FAIL
 usr/lib/squid/errors/es/ERR_DIR_LISTING
 usr/lib/squid/errors/es/ERR_DNS_FAIL
@@ -553,6 +583,7 @@ usr/lib/squid/errors/es/error-details.txt
 #usr/lib/squid/errors/et/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/et/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/et/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/et/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/et/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/et/ERR_DIR_LISTING
 #usr/lib/squid/errors/et/ERR_DNS_FAIL
@@ -597,6 +628,7 @@ usr/lib/squid/errors/es/error-details.txt
 #usr/lib/squid/errors/fa/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/fa/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/fa/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/fa/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/fa/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/fa/ERR_DIR_LISTING
 #usr/lib/squid/errors/fa/ERR_DNS_FAIL
@@ -640,6 +672,7 @@ usr/lib/squid/errors/es/error-details.txt
 #usr/lib/squid/errors/fi/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/fi/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/fi/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/fi/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/fi/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/fi/ERR_DIR_LISTING
 #usr/lib/squid/errors/fi/ERR_DNS_FAIL
@@ -688,6 +721,7 @@ usr/lib/squid/errors/fr/ERR_AGENT_WPAD
 usr/lib/squid/errors/fr/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/fr/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/fr/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/fr/ERR_CONFLICT_HOST
 usr/lib/squid/errors/fr/ERR_CONNECT_FAIL
 usr/lib/squid/errors/fr/ERR_DIR_LISTING
 usr/lib/squid/errors/fr/ERR_DNS_FAIL
@@ -731,6 +765,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/he/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/he/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/he/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/he/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/he/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/he/ERR_DIR_LISTING
 #usr/lib/squid/errors/he/ERR_DNS_FAIL
@@ -774,6 +809,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/hu/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/hu/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/hu/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/hu/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/hu/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/hu/ERR_DIR_LISTING
 #usr/lib/squid/errors/hu/ERR_DNS_FAIL
@@ -818,6 +854,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/hy/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/hy/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/hy/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/hy/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/hy/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/hy/ERR_DIR_LISTING
 #usr/lib/squid/errors/hy/ERR_DNS_FAIL
@@ -861,6 +898,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/id/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/id/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/id/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/id/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/id/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/id/ERR_DIR_LISTING
 #usr/lib/squid/errors/id/ERR_DNS_FAIL
@@ -905,6 +943,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/it/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/it/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/it/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/it/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/it/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/it/ERR_DIR_LISTING
 #usr/lib/squid/errors/it/ERR_DNS_FAIL
@@ -948,6 +987,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/ja/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ja/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ja/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ja/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ja/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ja/ERR_DIR_LISTING
 #usr/lib/squid/errors/ja/ERR_DNS_FAIL
@@ -992,6 +1032,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/ko/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ko/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ko/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ko/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ko/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ko/ERR_DIR_LISTING
 #usr/lib/squid/errors/ko/ERR_DNS_FAIL
@@ -1035,6 +1076,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/lt/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/lt/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/lt/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/lt/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/lt/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/lt/ERR_DIR_LISTING
 #usr/lib/squid/errors/lt/ERR_DNS_FAIL
@@ -1078,6 +1120,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/lv/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/lv/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/lv/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/lv/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/lv/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/lv/ERR_DIR_LISTING
 #usr/lib/squid/errors/lv/ERR_DNS_FAIL
@@ -1121,6 +1164,7 @@ usr/lib/squid/errors/fr/error-details.txt
 #usr/lib/squid/errors/ms/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ms/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ms/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ms/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ms/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ms/ERR_DIR_LISTING
 #usr/lib/squid/errors/ms/ERR_DNS_FAIL
@@ -1164,6 +1208,7 @@ usr/lib/squid/errors/nl/ERR_AGENT_WPAD
 usr/lib/squid/errors/nl/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/nl/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/nl/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/nl/ERR_CONFLICT_HOST
 usr/lib/squid/errors/nl/ERR_CONNECT_FAIL
 usr/lib/squid/errors/nl/ERR_DIR_LISTING
 usr/lib/squid/errors/nl/ERR_DNS_FAIL
@@ -1206,6 +1251,7 @@ usr/lib/squid/errors/nl/error-details.txt
 #usr/lib/squid/errors/oc/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/oc/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/oc/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/oc/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/oc/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/oc/ERR_DIR_LISTING
 #usr/lib/squid/errors/oc/ERR_DNS_FAIL
@@ -1249,6 +1295,7 @@ usr/lib/squid/errors/pl/ERR_AGENT_WPAD
 usr/lib/squid/errors/pl/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/pl/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/pl/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/pl/ERR_CONFLICT_HOST
 usr/lib/squid/errors/pl/ERR_CONNECT_FAIL
 usr/lib/squid/errors/pl/ERR_DIR_LISTING
 usr/lib/squid/errors/pl/ERR_DNS_FAIL
@@ -1292,6 +1339,7 @@ usr/lib/squid/errors/pl/error-details.txt
 #usr/lib/squid/errors/pt-br/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/pt-br/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/pt-br/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/pt-br/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/pt-br/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/pt-br/ERR_DIR_LISTING
 #usr/lib/squid/errors/pt-br/ERR_DNS_FAIL
@@ -1334,6 +1382,7 @@ usr/lib/squid/errors/pl/error-details.txt
 #usr/lib/squid/errors/pt/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/pt/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/pt/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/pt/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/pt/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/pt/ERR_DIR_LISTING
 #usr/lib/squid/errors/pt/ERR_DNS_FAIL
@@ -1378,6 +1427,7 @@ usr/lib/squid/errors/pl/error-details.txt
 #usr/lib/squid/errors/ro/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/ro/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/ro/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/ro/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/ro/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/ro/ERR_DIR_LISTING
 #usr/lib/squid/errors/ro/ERR_DNS_FAIL
@@ -1421,6 +1471,7 @@ usr/lib/squid/errors/ru/ERR_AGENT_WPAD
 usr/lib/squid/errors/ru/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/ru/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/ru/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/ru/ERR_CONFLICT_HOST
 usr/lib/squid/errors/ru/ERR_CONNECT_FAIL
 usr/lib/squid/errors/ru/ERR_DIR_LISTING
 usr/lib/squid/errors/ru/ERR_DNS_FAIL
@@ -1464,6 +1515,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/sk/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/sk/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/sk/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/sk/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/sk/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/sk/ERR_DIR_LISTING
 #usr/lib/squid/errors/sk/ERR_DNS_FAIL
@@ -1507,6 +1559,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/sl/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/sl/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/sl/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/sl/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/sl/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/sl/ERR_DIR_LISTING
 #usr/lib/squid/errors/sl/ERR_DNS_FAIL
@@ -1550,6 +1603,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/sr-cyrl/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/sr-cyrl/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/sr-cyrl/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/sr-cyrl/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/sr-cyrl/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/sr-cyrl/ERR_DIR_LISTING
 #usr/lib/squid/errors/sr-cyrl/ERR_DNS_FAIL
@@ -1593,6 +1647,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/sr-latn/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/sr-latn/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/sr-latn/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/sr-latn/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/sr-latn/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/sr-latn/ERR_DIR_LISTING
 #usr/lib/squid/errors/sr-latn/ERR_DNS_FAIL
@@ -1638,6 +1693,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/sv/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/sv/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/sv/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/sv/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/sv/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/sv/ERR_DIR_LISTING
 #usr/lib/squid/errors/sv/ERR_DNS_FAIL
@@ -1680,6 +1736,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/templates/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/templates/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/templates/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/templates/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/templates/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/templates/ERR_DIR_LISTING
 #usr/lib/squid/errors/templates/ERR_DNS_FAIL
@@ -1723,6 +1780,7 @@ usr/lib/squid/errors/ru/error-details.txt
 #usr/lib/squid/errors/th/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/th/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/th/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/th/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/th/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/th/ERR_DIR_LISTING
 #usr/lib/squid/errors/th/ERR_DNS_FAIL
@@ -1766,6 +1824,7 @@ usr/lib/squid/errors/tr/ERR_AGENT_WPAD
 usr/lib/squid/errors/tr/ERR_CACHE_ACCESS_DENIED
 usr/lib/squid/errors/tr/ERR_CACHE_MGR_ACCESS_DENIED
 usr/lib/squid/errors/tr/ERR_CANNOT_FORWARD
+usr/lib/squid/errors/tr/ERR_CONFLICT_HOST
 usr/lib/squid/errors/tr/ERR_CONNECT_FAIL
 usr/lib/squid/errors/tr/ERR_DIR_LISTING
 usr/lib/squid/errors/tr/ERR_DNS_FAIL
@@ -1809,6 +1868,7 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/uk/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/uk/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/uk/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/uk/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/uk/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/uk/ERR_DIR_LISTING
 #usr/lib/squid/errors/uk/ERR_DNS_FAIL
@@ -1851,6 +1911,7 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/uz/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/uz/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/uz/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/uz/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/uz/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/uz/ERR_DIR_LISTING
 #usr/lib/squid/errors/uz/ERR_DNS_FAIL
@@ -1894,6 +1955,7 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/vi/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/vi/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/vi/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/vi/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/vi/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/vi/ERR_DIR_LISTING
 #usr/lib/squid/errors/vi/ERR_DNS_FAIL
@@ -1936,6 +1998,7 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/zh-cn/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/zh-cn/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/zh-cn/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/zh-cn/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/zh-cn/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/zh-cn/ERR_DIR_LISTING
 #usr/lib/squid/errors/zh-cn/ERR_DNS_FAIL
@@ -1981,6 +2044,7 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/zh-tw/ERR_CACHE_ACCESS_DENIED
 #usr/lib/squid/errors/zh-tw/ERR_CACHE_MGR_ACCESS_DENIED
 #usr/lib/squid/errors/zh-tw/ERR_CANNOT_FORWARD
+#usr/lib/squid/errors/zh-tw/ERR_CONFLICT_HOST
 #usr/lib/squid/errors/zh-tw/ERR_CONNECT_FAIL
 #usr/lib/squid/errors/zh-tw/ERR_DIR_LISTING
 #usr/lib/squid/errors/zh-tw/ERR_DNS_FAIL
@@ -2015,65 +2079,101 @@ usr/lib/squid/errors/tr/error-details.txt
 #usr/lib/squid/errors/zh-tw/ERR_WRITE_ERROR
 #usr/lib/squid/errors/zh-tw/ERR_ZERO_SIZE_OBJECT
 #usr/lib/squid/errors/zh-tw/error-details.txt
-usr/lib/squid/fakeauth_auth
+usr/lib/squid/ext_edirectory_userip_acl
+usr/lib/squid/ext_file_userip_acl
+usr/lib/squid/ext_kerberos_ldap_group_acl
+usr/lib/squid/ext_ldap_group_acl
+usr/lib/squid/ext_session_acl
+usr/lib/squid/ext_sql_session_acl
+usr/lib/squid/ext_time_quota_acl
+usr/lib/squid/ext_unix_group_acl
+usr/lib/squid/ext_wbinfo_group_acl
+usr/lib/squid/helper-mux.pl
 usr/lib/squid/icons
-#usr/lib/squid/icons/anthony-binhex.gif
-#usr/lib/squid/icons/anthony-bomb.gif
-#usr/lib/squid/icons/anthony-box.gif
-#usr/lib/squid/icons/anthony-box2.gif
-#usr/lib/squid/icons/anthony-c.gif
-#usr/lib/squid/icons/anthony-compressed.gif
-#usr/lib/squid/icons/anthony-dir.gif
-#usr/lib/squid/icons/anthony-dirup.gif
-#usr/lib/squid/icons/anthony-dvi.gif
-#usr/lib/squid/icons/anthony-f.gif
-#usr/lib/squid/icons/anthony-image.gif
-#usr/lib/squid/icons/anthony-image2.gif
-#usr/lib/squid/icons/anthony-layout.gif
-#usr/lib/squid/icons/anthony-link.gif
-#usr/lib/squid/icons/anthony-movie.gif
-#usr/lib/squid/icons/anthony-pdf.gif
-#usr/lib/squid/icons/anthony-portal.gif
-#usr/lib/squid/icons/anthony-ps.gif
-#usr/lib/squid/icons/anthony-quill.gif
-#usr/lib/squid/icons/anthony-script.gif
-#usr/lib/squid/icons/anthony-sound.gif
-#usr/lib/squid/icons/anthony-tar.gif
-#usr/lib/squid/icons/anthony-tex.gif
-#usr/lib/squid/icons/anthony-text.gif
-#usr/lib/squid/icons/anthony-unknown.gif
-#usr/lib/squid/icons/anthony-xbm.gif
-#usr/lib/squid/icons/anthony-xpm.gif
-usr/lib/squid/ip_user_check
+usr/lib/squid/icons/SN.png
+usr/lib/squid/icons/silk
+usr/lib/squid/icons/silk/application.png
+usr/lib/squid/icons/silk/arrow_up.png
+usr/lib/squid/icons/silk/bomb.png
+usr/lib/squid/icons/silk/box.png
+usr/lib/squid/icons/silk/bricks.png
+usr/lib/squid/icons/silk/bullet_red.png
+usr/lib/squid/icons/silk/cd.png
+usr/lib/squid/icons/silk/chart_line.png
+usr/lib/squid/icons/silk/compress.png
+usr/lib/squid/icons/silk/computer_link.png
+usr/lib/squid/icons/silk/css.png
+usr/lib/squid/icons/silk/cup.png
+usr/lib/squid/icons/silk/database.png
+usr/lib/squid/icons/silk/database_table.png
+usr/lib/squid/icons/silk/drive_disk.png
+usr/lib/squid/icons/silk/film.png
+usr/lib/squid/icons/silk/film_key.png
+usr/lib/squid/icons/silk/folder.png
+usr/lib/squid/icons/silk/folder_table.png
+usr/lib/squid/icons/silk/image.png
+usr/lib/squid/icons/silk/information.png
+usr/lib/squid/icons/silk/layers.png
+usr/lib/squid/icons/silk/layout.png
+usr/lib/squid/icons/silk/link.png
+usr/lib/squid/icons/silk/music.png
+usr/lib/squid/icons/silk/package.png
+usr/lib/squid/icons/silk/package_go.png
+usr/lib/squid/icons/silk/page_code.png
+usr/lib/squid/icons/silk/page_excel.png
+usr/lib/squid/icons/silk/page_green.png
+usr/lib/squid/icons/silk/page_white.png
+usr/lib/squid/icons/silk/page_white_acrobat.png
+usr/lib/squid/icons/silk/page_white_c.png
+usr/lib/squid/icons/silk/page_white_cplusplus.png
+usr/lib/squid/icons/silk/page_white_flash.png
+usr/lib/squid/icons/silk/page_white_magnify.png
+usr/lib/squid/icons/silk/page_white_picture.png
+usr/lib/squid/icons/silk/page_white_powerpoint.png
+usr/lib/squid/icons/silk/page_white_stack.png
+usr/lib/squid/icons/silk/page_white_text.png
+usr/lib/squid/icons/silk/page_white_word.png
+usr/lib/squid/icons/silk/page_white_zip.png
+usr/lib/squid/icons/silk/page_world.png
+usr/lib/squid/icons/silk/photo.png
+usr/lib/squid/icons/silk/picture.png
+usr/lib/squid/icons/silk/plugin.png
+usr/lib/squid/icons/silk/plugin_add.png
+usr/lib/squid/icons/silk/script.png
+usr/lib/squid/icons/silk/script_gear.png
+usr/lib/squid/icons/silk/script_palette.png
+usr/lib/squid/log_db_daemon
+usr/lib/squid/log_file_daemon
 usr/lib/squid/mib.txt
-usr/lib/squid/msnt_auth
-usr/lib/squid/ncsa_auth
-usr/lib/squid/no_check.pl
+usr/lib/squid/negotiate_wrapper_auth
+usr/lib/squid/ntlm_fake_auth
 usr/lib/squid/ntlm_smb_lm_auth
-usr/lib/squid/pam_auth
-usr/lib/squid/pinger
-usr/lib/squid/smb_auth
-usr/lib/squid/smb_auth.pl
-usr/lib/squid/smb_auth.sh
-usr/lib/squid/squid_ldap_auth
-usr/lib/squid/squid_ldap_group
-usr/lib/squid/squid_radius_auth
-usr/lib/squid/squid_session
-usr/lib/squid/squid_unix_group
 usr/lib/squid/unlinkd
-usr/lib/squid/wbinfo_group.pl
+usr/lib/squid/url_fake_rewrite
+usr/lib/squid/url_fake_rewrite.sh
 usr/sbin/squid
 usr/sbin/updxlrator
 #usr/share/man/man1/squidclient.1
+#usr/share/man/man8/basic_db_auth.8
+#usr/share/man/man8/basic_getpwnam_auth.8
+#usr/share/man/man8/basic_ldap_auth.8
+#usr/share/man/man8/basic_ncsa_auth.8
+#usr/share/man/man8/basic_pam_auth.8
+#usr/share/man/man8/basic_radius_auth.8
+#usr/share/man/man8/basic_sasl_auth.8
 #usr/share/man/man8/cachemgr.cgi.8
-#usr/share/man/man8/ncsa_auth.8
-#usr/share/man/man8/pam_auth.8
+#usr/share/man/man8/digest_file_auth.8
+#usr/share/man/man8/ext_edirectory_userip_acl.8
+#usr/share/man/man8/ext_file_userip_acl.8
+#usr/share/man/man8/ext_ldap_group_acl.8
+#usr/share/man/man8/ext_session_acl.8
+#usr/share/man/man8/ext_sql_session_acl.8
+#usr/share/man/man8/ext_time_quota_acl.8
+#usr/share/man/man8/ext_unix_group_acl.8
+#usr/share/man/man8/ext_wbinfo_group_acl.8
+#usr/share/man/man8/log_db_daemon.8
 #usr/share/man/man8/squid.8
-#usr/share/man/man8/squid_ldap_auth.8
-#usr/share/man/man8/squid_ldap_group.8
-#usr/share/man/man8/squid_radius_auth.8
-#usr/share/man/man8/squid_session.8
-#usr/share/man/man8/squid_unix_group.8
+#var/cache/squid
 var/ipfire/proxy/errorpage-ipfire.css
 var/ipfire/proxy/errorpage-squid.css
 var/ipfire/updatexlrator/autocheck/cron.daily
@@ -2090,3 +2190,4 @@ var/log/cache
 var/log/squid/access.log
 var/log/updatexlrator
 #var/logs
+#var/run/squid
diff --git a/config/rootfiles/oldcore/73/exclude b/config/rootfiles/oldcore/73/exclude
new file mode 100644 (file)
index 0000000..321a931
--- /dev/null
@@ -0,0 +1,17 @@
+srv/web/ipfire/html/proxy.pac
+boot/config.txt
+etc/udev/rules.d/30-persistent-network.rules
+etc/collectd.custom
+etc/shadow
+etc/ipsec.conf
+etc/ipsec.secrets
+etc/ipsec.user.conf
+etc/ipsec.user.secrets
+var/log/cache
+var/updatecache
+etc/localtime
+var/ipfire/ovpn
+etc/ssh/ssh_config
+etc/ssh/sshd_config
+etc/ssl/openssl.cnf
+var/state/dhcp/dhcpd.leases
diff --git a/config/rootfiles/oldcore/73/filelists/HTML-Template b/config/rootfiles/oldcore/73/filelists/HTML-Template
new file mode 120000 (symlink)
index 0000000..f17c1da
--- /dev/null
@@ -0,0 +1 @@
+../../../common/HTML-Template
\ No newline at end of file
diff --git a/config/rootfiles/oldcore/73/filelists/armv5tel/ath-modul b/config/rootfiles/oldcore/73/filelists/armv5tel/ath-modul
new file mode 100644 (file)
index 0000000..63bcd75
--- /dev/null
@@ -0,0 +1,3 @@
+lib/modules/3.2.48-ipfire-omap/kernel/drivers/net/wireless/ath/ath.ko
+lib/modules/3.2.48-ipfire-kirkwood/kernel/drivers/net/wireless/ath/ath.ko
+lib/modules/3.2.48-ipfire-rpi/kernel/drivers/net/wireless/ath/ath.ko
diff --git a/config/rootfiles/oldcore/73/filelists/files b/config/rootfiles/oldcore/73/filelists/files
new file mode 100644 (file)
index 0000000..a1ef1bf
--- /dev/null
@@ -0,0 +1,10 @@
+etc/system-release
+etc/issue
+etc/rc.d/init.d/squid
+srv/web/ipfire/cgi-bin/logs.cgi/proxylog.dat
+srv/web/ipfire/cgi-bin/proxy.cgi
+srv/web/ipfire/html/redirect.cgi
+srv/web/ipfire/html/redirect-templates/
+var/ipfire/header.pl
+var/ipfire/langs
+var/ipfire/proxy/advanced/useragents
diff --git a/config/rootfiles/oldcore/73/filelists/i586/ath-modul b/config/rootfiles/oldcore/73/filelists/i586/ath-modul
new file mode 100644 (file)
index 0000000..6656dae
--- /dev/null
@@ -0,0 +1,3 @@
+lib/modules/2.6.32.61-ipfire-xen/kernel/drivers/net/wireless/ath/ath.ko
+lib/modules/3.2.48-ipfire/kernel/drivers/net/wireless/ath/ath.ko
+lib/modules/3.2.48-ipfire-pae/kernel/drivers/net/wireless/ath/ath.ko
diff --git a/config/rootfiles/oldcore/73/filelists/squid b/config/rootfiles/oldcore/73/filelists/squid
new file mode 120000 (symlink)
index 0000000..2dc8372
--- /dev/null
@@ -0,0 +1 @@
+../../../common/squid
\ No newline at end of file
diff --git a/config/rootfiles/oldcore/73/meta b/config/rootfiles/oldcore/73/meta
new file mode 100644 (file)
index 0000000..d547fa8
--- /dev/null
@@ -0,0 +1 @@
+DEPS=""
diff --git a/config/rootfiles/oldcore/73/update.sh b/config/rootfiles/oldcore/73/update.sh
new file mode 100644 (file)
index 0000000..6afca9f
--- /dev/null
@@ -0,0 +1,67 @@
+#!/bin/bash
+############################################################################
+#                                                                          #
+# This file is part of the IPFire Firewall.                                #
+#                                                                          #
+# IPFire is free software; you can redistribute it and/or modify           #
+# it under the terms of the GNU General Public License as published by     #
+# the Free Software Foundation; either version 3 of the License, or        #
+# (at your option) any later version.                                      #
+#                                                                          #
+# IPFire is distributed in the hope that it will be useful,                #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of           #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the            #
+# GNU General Public License for more details.                             #
+#                                                                          #
+# You should have received a copy of the GNU General Public License        #
+# along with IPFire; if not, write to the Free Software                    #
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA #
+#                                                                          #
+# Copyright (C) 2013 IPFire-Team <info@ipfire.org>.                        #
+#                                                                          #
+############################################################################
+#
+. /opt/pakfire/lib/functions.sh
+/usr/local/bin/backupctrl exclude >/dev/null 2>&1
+
+#
+# Remove old core updates from pakfire cache to save space...
+core=73
+for (( i=1; i<=$core; i++ ))
+do
+       rm -f /var/cache/pakfire/core-upgrade-*-$i.ipfire
+done
+
+
+#
+#Stop services
+/etc/init.d/squid stop
+
+
+#
+#Extract files
+extract_files
+
+# Regenerate squid configuration files.
+/srv/web/ipfire/cgi-bin/proxy.cgi
+
+#
+#Start services
+/etc/init.d/squid start
+
+#
+#Update Language cache
+perl -e "require '/var/ipfire/lang.pl'; &Lang::BuildCacheLang"
+
+sync
+
+# This update need a reboot...
+#touch /var/run/need_reboot
+
+#
+#Finish
+/etc/init.d/fireinfo start
+sendprofile
+#Don't report the exitcode last command
+exit 0
+
diff --git a/config/rootfiles/packages/iptraf-ng b/config/rootfiles/packages/iptraf-ng
new file mode 100644 (file)
index 0000000..4ad3c86
--- /dev/null
@@ -0,0 +1,5 @@
+usr/sbin/iptraf-ng
+usr/sbin/rvnamed-ng
+var/lib/iptraf-ng
+var/lock/iptraf-ng
+var/log/iptraf-ng
index 8eb6dad17fb7180e3d8f87333d891a97be75123f..58de7da86a2c5d4f3151cddff6f56219f383ddb0 100644 (file)
@@ -1,6 +1,9 @@
 #etc/logrotate.d
 etc/logrotate.d/tor
 etc/rc.d/init.d/tor
+etc/rc.d/rc0.d/K40tor
+etc/rc.d/rc3.d/S60tor
+etc/rc.d/rc6.d/K40tor
 #etc/tor
 etc/tor/tor-tsocks.conf
 etc/tor/torrc
diff --git a/config/rootfiles/packages/wavemon b/config/rootfiles/packages/wavemon
new file mode 100644 (file)
index 0000000..4f0fa3c
--- /dev/null
@@ -0,0 +1,10 @@
+usr/bin/wavemon
+#usr/share/man/man1/wavemon.1
+#usr/share/man/man5/wavemonrc.5
+#usr/share/wavemon
+#usr/share/wavemon/AUTHORS
+#usr/share/wavemon/COPYING
+#usr/share/wavemon/ChangeLog
+#usr/share/wavemon/NEWS
+#usr/share/wavemon/README
+#usr/share/wavemon/THANKS
diff --git a/config/rootfiles/packages/xinetd b/config/rootfiles/packages/xinetd
new file mode 100644 (file)
index 0000000..4c91ec9
--- /dev/null
@@ -0,0 +1,11 @@
+etc/rc.d/init.d/xinetd
+etc/xinetd.conf
+#etc/xinetd.d
+#usr/man/man5/xinetd.conf.5
+#usr/man/man8/itox.8
+#usr/man/man8/xconv.pl.8
+#usr/man/man8/xinetd.8
+#usr/man/man8/xinetd.log.8
+#usr/sbin/itox
+#usr/sbin/xconv.pl
+usr/sbin/xinetd
diff --git a/config/xinetd/xinetd.conf b/config/xinetd/xinetd.conf
new file mode 100644 (file)
index 0000000..b2112a1
--- /dev/null
@@ -0,0 +1,9 @@
+defaults
+{
+   instances      = 10
+   log_type       = SYSLOG daemon
+   log_on_success = HOST PID
+   log_on_failure = HOST
+   #only_from      = <your local networks>
+}
+includedir /etc/xinetd.d
index 3d81d45647f248dbe900d4a3ce6f6f5b854a1abf..2fafaf1806f6e5e67789a218b446418e6c09ebfa 100644 (file)
@@ -823,6 +823,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -844,6 +845,7 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
@@ -851,6 +853,7 @@ WARNING: untranslated string: tor traffic limit soft
 WARNING: untranslated string: tor traffic read written
 WARNING: untranslated string: tor use exit nodes
 WARNING: untranslated string: uptime load average
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: visit us at
 WARNING: untranslated string: vpn keyexchange
 WARNING: untranslated string: wlan client
index fd38d40ff45b0d1ee11b60fed778e1020e468124..b07e7ff50f9c2a982a8ff6919c818e851cb68444 100644 (file)
@@ -814,6 +814,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -835,6 +836,7 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
@@ -845,6 +847,7 @@ WARNING: untranslated string: upload new ruleset
 WARNING: untranslated string: uptime load average
 WARNING: untranslated string: urlfilter file ext block
 WARNING: untranslated string: urlfilter mode block
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: visit us at
 WARNING: untranslated string: vpn keyexchange
 WARNING: untranslated string: wlan client
index 8bd78ba6a21d931808ca552ea3ba125c37b9c34f..9e17b91f5b9fe227ee55fc1e348fe9e38e9dbaf7 100644 (file)
@@ -731,6 +731,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -752,6 +753,7 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
@@ -759,6 +761,7 @@ WARNING: untranslated string: tor traffic limit soft
 WARNING: untranslated string: tor traffic read written
 WARNING: untranslated string: tor use exit nodes
 WARNING: untranslated string: uptime load average
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: wlan client
 WARNING: untranslated string: wlan client advanced settings
 WARNING: untranslated string: wlan client and
index 3d81d45647f248dbe900d4a3ce6f6f5b854a1abf..2fafaf1806f6e5e67789a218b446418e6c09ebfa 100644 (file)
@@ -823,6 +823,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -844,6 +845,7 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
@@ -851,6 +853,7 @@ WARNING: untranslated string: tor traffic limit soft
 WARNING: untranslated string: tor traffic read written
 WARNING: untranslated string: tor use exit nodes
 WARNING: untranslated string: uptime load average
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: visit us at
 WARNING: untranslated string: vpn keyexchange
 WARNING: untranslated string: wlan client
index 04d4ad09a1fd7b5bcd1bf5bd5a0a6f39fbed0549..90d419df8ab40c6cb1723fdedc02cceffda20edf 100644 (file)
@@ -794,6 +794,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -815,6 +816,7 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
@@ -822,6 +824,7 @@ WARNING: untranslated string: tor traffic limit soft
 WARNING: untranslated string: tor traffic read written
 WARNING: untranslated string: tor use exit nodes
 WARNING: untranslated string: uptime load average
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: visit us at
 WARNING: untranslated string: vpn keyexchange
 WARNING: untranslated string: wlan client
index a7f2c39833e06719f3e0c12e7ff631443a6d0a10..b4f0dfec139c60f1394e2b4ffcefb6c9e11003d0 100644 (file)
@@ -727,6 +727,7 @@ WARNING: untranslated string: tor common settings
 WARNING: untranslated string: tor configuration
 WARNING: untranslated string: tor connected relays
 WARNING: untranslated string: tor contact info
+WARNING: untranslated string: tor daemon
 WARNING: untranslated string: tor enabled
 WARNING: untranslated string: tor errmsg invalid accounting limit
 WARNING: untranslated string: tor errmsg invalid ip or mask
@@ -748,12 +749,14 @@ WARNING: untranslated string: tor relay mode private bridge
 WARNING: untranslated string: tor relay mode relay
 WARNING: untranslated string: tor relay nickname
 WARNING: untranslated string: tor relay port
+WARNING: untranslated string: tor service
 WARNING: untranslated string: tor socks port
 WARNING: untranslated string: tor stats
 WARNING: untranslated string: tor traffic limit hard
 WARNING: untranslated string: tor traffic limit soft
 WARNING: untranslated string: tor traffic read written
 WARNING: untranslated string: tor use exit nodes
+WARNING: untranslated string: urlfilter redirect template
 WARNING: untranslated string: wlan client
 WARNING: untranslated string: wlan client advanced settings
 WARNING: untranslated string: wlan client and
index 511a32efce97b0789c6255465e436ef3ae31f1bb..20838cbba1e2e9a9881f0684fde51de93cfa2e0d 100644 (file)
 < tor configuration
 < tor connected relays
 < tor contact info
+< tor daemon
 < tor enabled
 < tor errmsg invalid accounting limit
 < tor errmsg invalid ip or mask
 < tor relay mode relay
 < tor relay nickname
 < tor relay port
+< tor service
 < tor socks port
 < tor stats
 < tor traffic limit hard
 < uptime load average
 < urlfilter file ext block
 < urlfilter mode block
+< urlfilter redirect template
 < visit us at
 < vpn keyexchange
 < wlanap access point
 < tor configuration
 < tor connected relays
 < tor contact info
+< tor daemon
 < tor enabled
 < tor errmsg invalid accounting limit
 < tor errmsg invalid ip or mask
 < tor relay mode relay
 < tor relay nickname
 < tor relay port
+< tor service
 < tor socks port
 < tor stats
 < tor traffic limit hard
 < updxlrtr standard view
 < uptime
 < uptime load average
+< urlfilter redirect template
 < visit us at
 < vpn keyexchange
 < wlanap country
 < tor configuration
 < tor connected relays
 < tor contact info
+< tor daemon
 < tor enabled
 < tor errmsg invalid accounting limit
 < tor errmsg invalid ip or mask
 < tor relay mode relay
 < tor relay nickname
 < tor relay port
+< tor service
 < tor socks port
 < tor stats
 < tor traffic limit hard
 < updxlrtr standard view
 < uptime
 < uptime load average
+< urlfilter redirect template
 < visit us at
 < vpn keyexchange
 < wlanap country
 < tor configuration
 < tor connected relays
 < tor contact info
+< tor daemon
 < tor enabled
 < tor errmsg invalid accounting limit
 < tor errmsg invalid ip or mask
 < tor relay mode relay
 < tor relay nickname
 < tor relay port
+< tor service
 < tor socks port
 < tor stats
 < tor traffic limit hard
 < updxlrtr standard view
 < uptime
 < uptime load average
+< urlfilter redirect template
 < visit us at
 < vpn keyexchange
 < week-graph
index e529be061df78e9f4419e3ebddb6d1cf14cf3209..da86f8917394ceebf230ac4ddefab149a0d330da 100644 (file)
@@ -90,7 +90,7 @@ if ($ENV{'QUERY_STRING'} && $cgiparams{'ACTION'} ne $Lang::tr{'update'})
        $cgiparams{'MONTH'} = $temp[1];
        $cgiparams{'DAY'} = $temp[2];  
        $cgiparams{'SOURCE_IP'} = $temp[3];
-       $cgiparams{'USERNAME'} = $temp[4];
+       $cgiparams{'USERNAME'} = &Header::escape($temp[4]);
 }
 
 if (!($cgiparams{'MONTH'} =~ /^(0|1|2|3|4|5|6|7|8|9|10|11)$/) ||
@@ -383,6 +383,7 @@ print <<END
 END
 ;
 foreach my $so (sort keys %users) {
+       $so = &Header::escape($so);
        print "<option value='$so' $selected{'USERNAME'}{$so}>$so</option>\n"; }
 print <<END
        </select>
index 899bf3efa2b5bc34b12aadf9c810057d1e0b3c9b..f0123588492f3524c17beec7b823da2d159ded64 100644 (file)
@@ -2150,14 +2150,15 @@ else
 # m.a.d net2net
 ###
 
- if ($confighash{$cgiparams{'KEY'}}[3] eq 'net') {
-
+if ($confighash{$cgiparams{'KEY'}}[3] eq 'net') {
        my $conffile = glob("${General::swroot}/ovpn/n2nconf/$confighash{$cgiparams{'KEY'}}[1]/$confighash{$cgiparams{'KEY'}}[1].conf");
-  my $certfile = glob("${General::swroot}/ovpn/certs/$confighash{$cgiparams{'KEY'}}[1].p12");
-  unlink ($certfile) or die "Removing $certfile fail: $!";
-  unlink ($conffile) or die "Removing $conffile fail: $!";
-  rmdir ("${General::swroot}/ovpn/n2nconf/$confighash{$cgiparams{'KEY'}}[1]") || die "Kann Verzeichnis nicht loeschen: $!";
-  
+       my $certfile = glob("${General::swroot}/ovpn/certs/$confighash{$cgiparams{'KEY'}}[1].p12");
+       unlink ($certfile);
+       unlink ($conffile);
+
+       if (-e "${General::swroot}/ovpn/n2nconf/$confighash{$cgiparams{'KEY'}}[1]") {
+               rmdir ("${General::swroot}/ovpn/n2nconf/$confighash{$cgiparams{'KEY'}}[1]") || die "Kann Verzeichnis nicht loeschen: $!";
+       }
 }
 
   unlink ("${General::swroot}/ovpn/certs/$confighash{$cgiparams{'KEY'}}[1]cert.pem");
@@ -3559,35 +3560,33 @@ if ($cgiparams{'TYPE'} eq 'net') {
        }
 
        # Check if a remote host/IP has been set for the client.
-       if ($cgiparams{'REMOTE'} eq '' && $cgiparams{'SIDE'} ne 'server') {
-           $errormessage = $Lang::tr{'invalid input for remote host/ip'};
+       if ($cgiparams{'TYPE'} eq 'net') {
+               if ($cgiparams{'SIDE'} ne 'server' && $cgiparams{'REMOTE'} eq '') {
+                       $errormessage = $Lang::tr{'invalid input for remote host/ip'};
 
-           # Check if this is a N2N connection and drop temporary config.
-           if ($cgiparams{'TYPE'} eq 'net') {
-               unlink ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}/$cgiparams{'NAME'}.conf") or die "Removing Configfile fail: $!";
-               rmdir ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}") || die "Removing Directory fail: $!";
-           }
-           goto VPNCONF_ERROR;
-       }
+                       # Check if this is a N2N connection and drop temporary config.
+                       unlink ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}/$cgiparams{'NAME'}.conf") or die "Removing Configfile fail: $!";
+                       rmdir ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}") || die "Removing Directory fail: $!";
 
-       # Check if a remote host/IP has been configured - the field can be empty on the server side.
-       if ($cgiparams{'REMOTE'} ne '') {
+                       goto VPNCONF_ERROR;
+               }
 
-           # Check if the given IP is valid - otherwise check if it is a valid domain.
-           if (! &General::validip($cgiparams{'REMOTE'})) {
+               # Check if a remote host/IP has been configured - the field can be empty on the server side.
+               if ($cgiparams{'REMOTE'} ne '') {
+                       # Check if the given IP is valid - otherwise check if it is a valid domain.
+                       if (! &General::validip($cgiparams{'REMOTE'})) {
+                               # Check for a valid domain.
+                               if (! &General::validfqdn ($cgiparams{'REMOTE'}))  {
+                                       $errormessage = $Lang::tr{'invalid input for remote host/ip'};
 
-               # Check for a valid domain.
-               if (! &General::validfqdn ($cgiparams{'REMOTE'}))  {
-                   $errormessage = $Lang::tr{'invalid input for remote host/ip'};
+                                       # Check if this is a N2N connection and drop temporary config.
+                                       unlink ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}/$cgiparams{'NAME'}.conf") or die "Removing Configfile fail: $!";
+                                       rmdir ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}") || die "Removing Directory fail: $!";
 
-                   # Check if this is a N2N connection and drop temporary config.
-                   if ($cgiparams{'TYPE'} eq 'net') {
-                       unlink ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}/$cgiparams{'NAME'}.conf") or die "Removing Configfile fail: $!";
-                       rmdir ("${General::swroot}/ovpn/n2nconf/$cgiparams{'NAME'}") || die "Removing Directory fail: $!";
-                   }
-                   goto VPNCONF_ERROR;
+                                       goto VPNCONF_ERROR;
+                               }
+                       }
                }
-           }
        }
 
        if ($cgiparams{'TYPE'} ne 'host') {
index 870042ae519e2044119d8fbe06bd8a848626e197..bcdc2024f574e71a2f383b9f746393f237f1eacf 100644 (file)
@@ -3144,12 +3144,12 @@ END
        if ($proxysettings{'LOGGING'} eq 'on')
        {
                print FILE <<END
-access_log /var/log/squid/access.log
+access_log stdio:/var/log/squid/access.log
 cache_log /var/log/squid/cache.log
 cache_store_log none
 END
        ;
-               if ($proxysettings{'LOGUSERAGENT'} eq 'on') { print FILE "useragent_log \/var\/log\/squid\/user_agent.log\n"; }
+               if ($proxysettings{'LOGUSERAGENT'} eq 'on') { print FILE "access_log stdio:\/var\/log\/squid\/user_agent.log useragent\n"; }
                if ($proxysettings{'LOGQUERY'} eq 'on') { print FILE "\nstrip_query_terms off\n"; }
        } else {
                print FILE <<END
@@ -3182,7 +3182,7 @@ END
        {
                if ($proxysettings{'AUTH_METHOD'} eq 'ncsa')
                {
-                       print FILE "auth_param basic program $authdir/ncsa_auth $userdb\n";
+                       print FILE "auth_param basic program $authdir/basic_ncsa_auth $userdb\n";
                        print FILE "auth_param basic children $proxysettings{'AUTH_CHILDREN'}\n";
                        print FILE "auth_param basic realm $authrealm\n";
                        print FILE "auth_param basic credentialsttl $proxysettings{'AUTH_CACHE_TTL'} minutes\n";
@@ -3192,7 +3192,7 @@ END
                if ($proxysettings{'AUTH_METHOD'} eq 'ldap')
                {
                        print FILE "auth_param basic utf8 on\n";
-                       print FILE "auth_param basic program $authdir/squid_ldap_auth -b \"$proxysettings{'LDAP_BASEDN'}\"";
+                       print FILE "auth_param basic program $authdir/basic_ldap_auth -b \"$proxysettings{'LDAP_BASEDN'}\"";
                        if (!($proxysettings{'LDAP_BINDDN_USER'} eq '')) { print FILE " -D \"$proxysettings{'LDAP_BINDDN_USER'}\""; }
                        if (!($proxysettings{'LDAP_BINDDN_PASS'} eq '')) { print FILE " -w $proxysettings{'LDAP_BINDDN_PASS'}"; }
                        if ($proxysettings{'LDAP_TYPE'} eq 'ADS')
@@ -3243,7 +3243,7 @@ END
                                print FILE "auth_param ntlm children $proxysettings{'AUTH_CHILDREN'}\n";
                                if (!($proxysettings{'AUTH_IPCACHE_TTL'} eq '0')) { print FILE "\nauthenticate_ip_ttl $proxysettings{'AUTH_IPCACHE_TTL'} minutes\n"; }
                        } else {
-                               print FILE "auth_param basic program $authdir/msnt_auth\n";
+                               print FILE "auth_param basic program $authdir/basic_msnt_auth\n";
                                print FILE "auth_param basic children $proxysettings{'AUTH_CHILDREN'}\n";
                                print FILE "auth_param basic realm $authrealm\n";
                                print FILE "auth_param basic credentialsttl $proxysettings{'AUTH_CACHE_TTL'} minutes\n";
@@ -3269,7 +3269,7 @@ END
 
                if ($proxysettings{'AUTH_METHOD'} eq 'radius')
                {
-                       print FILE "auth_param basic program $authdir/squid_radius_auth -h $proxysettings{'RADIUS_SERVER'} -p $proxysettings{'RADIUS_PORT'} ";
+                       print FILE "auth_param basic program $authdir/basic_radius_auth -h $proxysettings{'RADIUS_SERVER'} -p $proxysettings{'RADIUS_PORT'} ";
                        if (!($proxysettings{'RADIUS_IDENTIFIER'} eq '')) { print FILE "-i $proxysettings{'RADIUS_IDENTIFIER'} "; }
                        print FILE "-w $proxysettings{'RADIUS_SECRET'}\n";
                        print FILE "auth_param basic children $proxysettings{'AUTH_CHILDREN'}\n";
@@ -3362,11 +3362,6 @@ END
                print FILE "acl blocked_mimetypes rep_mime_type \"$mimetypes\"\n\n";
        }
 
-       print FILE <<END
-#acl all src all
-acl localhost src 127.0.0.1/32
-END
-;
 open (PORTS,"$acl_ports_ssl");
 @temp = <PORTS>;
 close PORTS;
@@ -3463,7 +3458,6 @@ END
        if ($proxysettings{'ENABLE_CLAMAV'} eq 'on') {
                print FILE "\n#Settings for squidclamav:\n";
                print FILE "http_port 127.0.0.1:$proxysettings{'PROXY_PORT'} transparent\n";
-               print FILE "acl to_localhost dst 127.0.0.0/8\n";
                print FILE "acl purge method PURGE\n";
                print FILE "http_access deny to_localhost\n";
                print FILE "http_access allow localhost\n";
old mode 100644 (file)
new mode 100755 (executable)
index 2a31dd4..b920724
@@ -57,9 +57,42 @@ my @accounting_periods = ('daily', 'weekly', 'monthly');
 
 my $TOR_CONTROL_PORT = 9051;
 
+my $string=();
+my $memory=();
+my @memory=();
+my @pid=();
+my @tor=();
+sub daemonstats
+{
+       $memory = 0;
+       # for pid and memory
+       open(FILE, '/usr/local/bin/addonctrl tor status | ');
+       @tor = <FILE>;
+       close(FILE);
+       $string = join("", @tor);
+       $string =~ s/[a-z_]//gi;
+       $string =~ s/\[[0-1]\;[0-9]+//gi;
+       $string =~ s/[\(\)\.]//gi;
+       $string =~ s/  //gi;
+       $string =~ s/\e//gi;
+       @pid = split(/\s/,$string);
+       if (open(FILE, "/proc/$pid[0]/statm")){
+               my $temp = <FILE>;
+               @memory = split(/ /,$temp);
+               close(FILE);
+               }
+       $memory+=$memory[0];
+}
+daemonstats();
+
 our %netsettings = ();
 &General::readhash("${General::swroot}/ethernet/settings", \%netsettings);
 
+our %color = ();
+our %mainsettings = ();
+&General::readhash("${General::swroot}/main/settings", \%mainsettings);
+&General::readhash("/srv/web/ipfire/html/themes/".$mainsettings{'THEME'}."/include/colors.txt", \%color);
+
 our %settings = ();
 
 $settings{'TOR_ENABLED'} = 'off';
@@ -192,12 +225,37 @@ sub showMainBox() {
 
        print "<form method='post' action='$ENV{'SCRIPT_NAME'}'>\n";
 
-       &Header::openbox('100%', 'left', $Lang::tr{'tor configuration'});
+       &Header::openbox('100%', 'center', $Lang::tr{'tor'});
+
+
+if ( ($memory != 0) && (@pid[0] ne "///") ){
+               print "<table width='95%' cellspacing='0'>";
+               print "<tr><td bgcolor='$color{'color20'}' colspan='3' align='left'><strong>$Lang::tr{'tor service'}</strong></td></tr>";
+               print "<tr><td class='base'>$Lang::tr{'tor daemon'}</td>";
+               print "<td align='center' colspan='2' width='75%' bgcolor='${Header::colourgreen}'><font color='white'><strong>$Lang::tr{'running'}</strong></font></td></tr>";
+               print "<tr><td class='base'></td>";
+               print "<td bgcolor='$color{'color20'}' align='center'><strong>PID</strong></td>";
+               print "<td bgcolor='$color{'color20'}' align='center'><strong>$Lang::tr{'memory'}</strong></td></tr>";
+               print "<tr><td class='base'></td>";
+               print "<td bgcolor='$color{'color22'}' align='center'>@pid[0]</td>";
+               print "<td bgcolor='$color{'color22'}' align='center'>$memory KB</td></tr>";
+               print "</table>";
+       } else {
+               print "<table width='95%' cellspacing='0'>";
+               print "<tr><td bgcolor='$color{'color20'}' colspan='3' align='left'><strong>$Lang::tr{'tor service'}</strong></td></tr>";
+               print "<tr><td class='base'>$Lang::tr{'tor daemon'}</td>";
+               print "<td align='center' width='75%' bgcolor='${Header::colourred}'><font color='white'><strong>$Lang::tr{'stopped'}</strong></font></td></tr>";
+               print "</table>";
+       }
+
+       &Header::closebox();
+
+       &Header::openbox('100%', 'center', $Lang::tr{'tor configuration'});
 
        print <<END;
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
-                               <td colspan='4' class='base'><b>$Lang::tr{'tor common settings'}</b></td>
+                               <td colspan='4' class='base' bgcolor='$color{'color20'}'><b>$Lang::tr{'tor common settings'}</b></td>
                        </tr>
                        <tr>
                                <td width='25%' class='base'>$Lang::tr{'tor enabled'}:</td>
@@ -222,12 +280,11 @@ END
 
        print <<END;
                <br>
-               <hr size='1'>
                <br>
 
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
-                               <td colspan='4' class='base'><b>$Lang::tr{'tor acls'}</b></td>
+                               <td colspan='4' class='base' bgcolor='$color{'color20'}'><b>$Lang::tr{'tor acls'}</b></td>
                        </tr>
                        <tr>
                                <td colspan='2' class='base' width='55%'>
@@ -244,12 +301,11 @@ END
                </table>
 
                <br>
-               <hr size='1'>
                <br>
 
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
-                               <td colspan='4' class='base'><b>$Lang::tr{'tor exit nodes'}</b></td>
+                               <td colspan='4' class='base' bgcolor='$color{'color20'}'><b>$Lang::tr{'tor exit nodes'}</b></td>
                        </tr>
                        <tr>
                                <td colspan='2' class='base' width='55%'></td>
@@ -265,7 +321,13 @@ END
                foreach my $country_name (sort @country_names) {
                        my $country_code = Locale::Country::country2code($country_name);
                        $country_code = uc($country_code);
-                       print "<option value='$country_code'>$country_name ($country_code)</option>\n";
+                       print "<option value='$country_code'";
+
+                       if ($settings{'TOR_EXIT_COUNTRY'} eq $country_code) {
+                               print " selected";
+                       }
+
+                       print ">$country_name ($country_code)</option>\n";
                }
 
        print <<END;
@@ -276,7 +338,6 @@ END
                                </td>
                        </tr>
                </table>
-               <br><br>
 END
 
        &Header::closebox();
@@ -305,10 +366,10 @@ END
        }
        $selected{'TOR_RELAY_ACCOUNTING_PERIOD'}{$settings{'TOR_RELAY_ACCOUNTING_PERIOD'}} = 'selected';
 
-       &Header::openbox('100%', 'left', $Lang::tr{'tor relay configuration'});
+       &Header::openbox('100%', 'center', $Lang::tr{'tor relay configuration'});
 
        print <<END;
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
                                <td width='25%' class='base'>$Lang::tr{'tor relay mode'}:</td>
                                <td width='30%'>
@@ -342,11 +403,11 @@ END
                        </tr>
                </table>
 
-               <hr size='1'>
+               <br>
 
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
-                               <td colspan='4' class='base'><b>$Lang::tr{'tor bandwidth settings'}</b></td>
+                               <td colspan='4' class='base' bgcolor='$color{'color20'}'><b>$Lang::tr{'tor bandwidth settings'}</b></td>
                        </tr>
                        <tr>
                                <td width='25%' class='base'>$Lang::tr{'tor bandwidth rate'}:</td>
@@ -407,7 +468,7 @@ END
        &Header::closebox();
 
        print <<END;
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
                                <td>
                                        <img src='/blob.gif' align='top' alt='*' />&nbsp;<font class='base'>$Lang::tr{'this field may be blank'}</font>
@@ -418,7 +479,7 @@ END
 
                <hr>
 
-               <table width='100%'>
+               <table width='95%'>
                        <tr>
                                <td>&nbsp;</td>
                                <td align='center'><input type='submit' name='ACTION' value='$Lang::tr{'save'}' /></td>
@@ -429,13 +490,13 @@ END
 
        # If we have a control connection, show the stats.
        if ($torctrl) {
-               &Header::openbox('100%', 'left', $Lang::tr{'tor stats'});
+               &Header::openbox('100%', 'center', $Lang::tr{'tor stats'});
 
                my @traffic = &TorTrafficStats($torctrl);
 
                if (@traffic) {
                        print <<END;
-                               <table width='100%'>
+                               <table width='95%'>
 END
 
                if ($settings{'TOR_RELAY_ENABLED'} eq 'on') {
@@ -476,7 +537,7 @@ END
                my $accounting = &TorAccountingStats($torctrl);
                if ($accounting) {
                        print <<END;
-                               <table width='100%'>
+                               <table width='95%'>
                                        <tr>
                                                <td colspan='2' class='base'><b>$Lang::tr{'tor accounting'}</b></td>
                                        </tr>
@@ -527,7 +588,7 @@ END
                if (@nodes) {
                        my $nodes_length = scalar @nodes;
                        print <<END;
-                               <table width='100%'>
+                               <table width='95%'>
                                        <tr>
                                                <td width='40%' class='base'><b>$Lang::tr{'tor connected relays'}</b></td>
                                                <td width='60%' colspan='2'>($nodes_length)</td>
@@ -546,7 +607,11 @@ END
 END
 
                                if (exists($node->{'country_code'})) {
+                                       if ($node->{'country_code'} eq '??') {
+                                               print "<img src='/images/flags/blank.png' border='0' align='absmiddle'/>";
+                                       } else {
                                                print "<a href='country.cgi#$node->{'country_code'}'><img src='/images/flags/$node->{'country_code'}.png' border='0' align='absmiddle' alt='$node->{'country_code'}'></a>";
+                                       }
                                }
 
                                print <<END;
@@ -689,6 +754,8 @@ sub BuildConfiguration() {
        } else {
                system("/usr/local/bin/torctrl stop &>/dev/null");
        }
+       # Update pid and memory
+       daemonstats();
 }
 
 sub TorConnect() {
index 1bfc0392cd6c788cf5115f48d99538c9dded0170..1b40a3cf036171f84ad533de036af3cd1a2e54ff 100644 (file)
@@ -59,6 +59,7 @@ my $tcfile = "${General::swroot}/urlfilter/timeconst";
 my $uqfile = "${General::swroot}/urlfilter/userquota";
 my $dbdir = "${General::swroot}/urlfilter/blacklists";
 my $editdir = "${General::swroot}/urlfilter/editor";
+my $templatedir = "/srv/web/ipfire/html/redirect-templates";
 my $repository = "/var/urlrepo";
 my $hintcolour = '#FFFFCC';
 
@@ -142,6 +143,7 @@ $filtersettings{'ENABLE_LOG'} = 'off';
 $filtersettings{'ENABLE_USERNAME_LOG'} = 'off';
 $filtersettings{'ENABLE_CATEGORY_LOG'} = 'off';
 $filtersettings{'ENABLE_AUTOUPDATE'} = 'off';
+$filtersettings{'REDIRECT_TEMPLATE'} = 'legacy';
 
 $filtersettings{'ACTION'} = '';
 $filtersettings{'VALID'} = '';
@@ -1074,6 +1076,8 @@ foreach $category (@filtergroups) {
        $checked{$category}{$filtersettings{$category}} = "checked='checked'";
 }
 
+$selected{'REDIRECT_TEMPLATE'}{$filtersettings{'REDIRECT_TEMPLATE'}} = "selected='selected'";
+
 $selected{'DEFINITION'}{$tcsettings{'DEFINITION'}} = "selected='selected'";
 $selected{'FROM_HOUR'}{$tcsettings{'FROM_HOUR'}} = "selected='selected'";
 $selected{'FROM_MINUTE'}{$tcsettings{'FROM_MINUTE'}} = "selected='selected'";
@@ -1415,6 +1419,24 @@ print <<END
 <tr>
         <td colspan='4'><b>$Lang::tr{'urlfilter block settings'}</b></td>
 </tr>
+<tr>
+       <td width='25%' class='base'>$Lang::tr{'urlfilter redirect template'}</td>
+       <td width='75%' colspan='2'>
+               <select name='REDIRECT_TEMPLATE'>
+END
+;
+
+       foreach (<$templatedir/*>) {
+               if ((-d "$_") && (-e "$_/template.html")) {
+                       my $template = substr($_,rindex($_,"/")+1);
+                       print "<option value='$template' $selected{'REDIRECT_TEMPLATE'}{$template}>$template</option>\n";
+               }
+       }
+
+print <<END
+               </select>
+       </td>
+</tr>
 <tr>
        <td width='25%' class='base'>$Lang::tr{'urlfilter show category'}:</td>
        <td width='12%'><input type='checkbox' name='SHOW_CATEGORY' $checked{'SHOW_CATEGORY'}{'on'} /></td>
diff --git a/html/html/redirect-templates/legacy/template.html b/html/html/redirect-templates/legacy/template.html
new file mode 100644 (file)
index 0000000..b5fb61e
--- /dev/null
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">
+<html>
+       <head>
+               <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> 
+               <title>ACCESS MESSAGE</title>
+       </head>
+       <body>
+               <table width="100%" height='100%' border="0">
+                       <tr>
+                               <td colspan='3' width='100%' height='130' align="center" background="<TMPL_VAR NAME="ADDRESS">/images/background.gif">
+                       <tr>
+                               <td width='10%'>
+                               <td align='center' bgcolor='#CC000000' width='80%'>
+                                       <font face="verdana, arial, sans serif" color="#FFFFFF" size="5">
+                                               <b><TMPL_VAR NAME="MSG_TEXT_1"></b>
+                                       </font>
+                               <td width='10%'>
+
+                       <TMPL_IF NAME="CATEGORY">
+                               <tr>
+                                       <td colspan='3' align='center'>
+                                               <font face="verdana, arial, sans serif" color="#CC000000" size="1">
+                                                       <b>[<TMPL_VAR NAME="CATEGORY">]</b>
+                                               </font>
+                       </TMPL_IF>
+
+                       <tr>
+                               <td colspan='3' align="center">
+                                       <font face="verdana, arial, sans serif" color="#000000" size="4">
+                                               <b><TMPL_VAR NAME="MSG_TEXT_2"></b>
+                                       </font>
+                                       <font face="verdana,arial,sans serif" color="#000000" size="2">
+                                               <TMPL_IF NAME="URL">
+                                                       <p>URL: <a href="<TMPL_VAR NAME="URL">"><TMPL_VAR NAME="URL"></a>
+                                               </TMPL_IF>
+
+                                               <TMPL_IF NAME="IP_ADDRESS">
+                                                       <p>Client IP address: <i><TMPL_VAR NAME="IP_ADDRESS"></i>
+                                               </TMPL_IF>
+
+                                               <br><p><TMPL_VAR NAME="MSG_TEXT_3">
+                                       </font>
+
+                       <tr>
+                               <td colspan='3' height='60%' valign="bottom" align="right">
+                                       <font face="verdana,arial,sans serif" color="#656565" size="1">
+                                               Web Filtering by
+                                       </font>
+                                       <a href="http://www.ipfire.org" target="_blank">
+                                               <font face="verdana,arial,sans serif" color="#656565" size="1"><b>IPFire</b></font>
+                                       </a>
+
+               </table>
+       </body>
+</html>
index 93f1ea4a96aece6bdc9064120b614ab086f49ee6..4f9d2278e2ba5e3ce99dd28df1848b683c766b7d 100644 (file)
 #                                                                             #
 ###############################################################################
 
-use CGI qw(param);
+use CGI;
+use HTML::Entities;
+use HTML::Template;
 
-$swroot="/var/ipfire";
+my $swroot="/var/ipfire";
+my $templateroot = "/srv/web/ipfire/html/redirect-templates";
 
 my %netsettings;
 my %filtersettings;
@@ -29,117 +32,75 @@ my %filtersettings;
 &readhash("$swroot/ethernet/settings", \%netsettings);
 &readhash("$swroot/urlfilter/settings", \%filtersettings);
 
-$category=param("category");
-$url=param("url");
-$ip=param("ip");
+# Read the template file.
+my $template = $filtersettings{'REDIRECT_TEMPLATE'};
+if (($template eq '') || (! -e "$templateroot/$template")) {
+       $template = "legacy";
+}
+my $tmpl = HTML::Template->new(
+       filename => "$templateroot/$template/template.html",
+       die_on_bad_params => 0
+);
+
+# Address where to load more resources from.
+$tmpl->param(ADDRESS => "http://$netsettings{'GREEN_ADDRESS'}:81");
 
-if ($filtersettings{'MSG_TEXT_1'} eq '') {
+# Message text 1
+my $msgtext1 = $filtersettings{'MSG_TEXT_1'};
+if ($msgtext1 eq '') {
        $msgtext1 = "A C C E S S &nbsp;&nbsp; D E N I E D";
-} else { $msgtext1 = $filtersettings{'MSG_TEXT_1'}; }
-if ($filtersettings{'MSG_TEXT_2'} eq '') {
+}
+$tmpl->param(MSG_TEXT_1 => $msgtext1);
+
+# Message text 2
+my $msgtext2 = $filtersettings{'MSG_TEXT_2'};
+if ($msgtext2 eq '') {
        $msgtext2 = "Access to the requested page has been denied";
-} else { $msgtext2 = $filtersettings{'MSG_TEXT_2'}; }
-if ($filtersettings{'MSG_TEXT_3'} eq '') {
+}
+$tmpl->param(MSG_TEXT_2 => $msgtext2);
+
+# Message text 3
+my $msgtext3 = $filtersettings{'MSG_TEXT_3'};
+if ($msgtext3 eq '') {
        $msgtext3 = "Please contact the Network Administrator if you think there has been an error";
-} else { $msgtext3 = $filtersettings{'MSG_TEXT_3'}; }
+}
+$tmpl->param(MSG_TEXT_3 => $msgtext3);
+
+# Category
+my $category = CGI::param("category");
+$tmpl->param(CATEGORY => &escape($category));
+
+# URL
+my $url = CGI::param("url");
+$tmpl->param(URL => &escape($url));
 
-if ($category eq '') { $category = '&nbsp;'; } else { $category = '['.$category.']'; }
+# IP address
+my $ip_address = CGI::param("ip");
+$tmpl->param(IP_ADDRESS => &escape($ip_address));
 
+# Print header
 print "Pragma: no-cache\n";
 print "Cache-control: no-cache\n";
 print "Connection: close\n";
 print "Content-type: text/html\n\n";
+print $tmpl->output;
 
-print <<END
-
-<html>
-<head>
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> 
-<title>ACCESS MESSAGE</title>
-</head>
-
-<body>
-<table width="100%" height='100%' border="0">
-<tr>
-               <td colspan='3' width='100%' height='130' align="center" background="http://$netsettings{'GREEN_ADDRESS'}:81/images/background.gif">
-<tr>           <td width='10%'><td align='center' bgcolor='#CC000000' width='80%'><font face="verdana, arial, sans serif" color="#FFFFFF" size="5">
-                                       <b>$msgtext1</b>
-                                       </font>
-               <td width='10%'>
-END
-;
-
-if (!($category eq ""))
-{
-       print <<END
-       <tr>            <td colspan='3' align='center'>
-                               <font face="verdana, arial, sans serif" color="#CC000000" size="1">
-                                       <b>$category</b>
-                               </font>
-END
-;
+sub escape($) {
+       my $s = shift;
+       return HTML::Entities::encode_entities($s);
 }
-print <<END
-<tr>
-                       <td colspan='3' align="center">
-                               <font face="verdana, arial, sans serif" color="#000000" size="4">
-                               <b>$msgtext2</b>
-                               </font>
-                               <font face="verdana,arial,sans serif" color="#000000" size="2">
-END
-;
-
-if (!($url eq ""))
-{
-print <<END
-                                       <p>URL: <a href="$url">$url</a>
-END
-;
-}
-
-if (!($ip eq ""))
-{
-print <<END
-                                       <p>Client IP address: <i>$ip</i>
-END
-;
-}
-
-print <<END
-                                       <br><p>$msgtext3
-                                       </font>
 
-<tr>
-       <td colspan='3' height='60%' valign="bottom" align="right">
-               <font face="verdana,arial,sans serif" color="#656565" size="1">Web Filtering by
-               </font>
-               <a href="http://www.ipfire.org" target="_blank"><b>
-               <font face="verdana,arial,sans serif" color="#656565" size="1">IPFire</b></a>
-               </font>
-
-</table>
-</body>
-
-</html>
-END
-;
-
-sub readhash
-{
+sub readhash {
        my $filename = $_[0];
        my $hash = $_[1];
        my ($var, $val);
 
-       if (-e $filename)
-       {
+       if (-e $filename) {
                open(FILE, $filename) or die "Unable to read file $filename";
-               while (<FILE>)
-               {
+               while (<FILE>) {
                        chop;
                        ($var, $val) = split /=/, $_, 2;
-                       if ($var)
-                       {
+                       if ($var) {
                                $val =~ s/^\'//g;
                                $val =~ s/\'$//g;
        
@@ -149,6 +110,7 @@ sub readhash
                                $hash->{$var} = $val;
                        }
                }
+
                close FILE;
        }
 }
index 279bfaab21f3b2545fbf9f7c82e8c27c273e45fe..c054b0c84585d71639b1895d7a82f3ac28870c59 100644 (file)
 'tor configuration' => 'Tor-Konfiguration',
 'tor connected relays' => 'Verbundene Relays',
 'tor contact info' => 'Kontaktinformationen',
+'tor daemon' => 'Daemon',
 'tor enabled' => 'Tor einschalten',
 'tor errmsg invalid accounting limit' => 'Ungültiges Accounting-Limit',
 'tor errmsg invalid ip or mask' => 'Ungültiges IP-Subnetz',
 'tor relay mode relay' => 'Nur Relay',
 'tor relay nickname' => 'Relay-Nickname',
 'tor relay port' => 'Relay-Port',
+'tor service' => 'Tor-Service',
 'tor socks port' => 'SOCKS-Port',
 'tor stats' => 'Statistiken',
 'tor traffic limit hard' => 'Das Ãœbertragungslimit wurde erreicht.',
 'urlfilter quota restart message' => 'Hinweis: Beim Neustart des URL-Filters werden die Zähler für alle Benutzer zurückgesetzt',
 'urlfilter quota time error' => 'Ungültiger Wert für Zeitkontingent',
 'urlfilter quota user error' => 'Mindestens ein Benutzername erforderlich',
+'urlfilter redirect template' => 'Sperrseitenvorlage',
 'urlfilter redirectpage' => 'Leite zu dieser URL um',
 'urlfilter remove file' => 'Datei aus der Ablage entfernen',
 'urlfilter renewal' => 'Erneuerung',
index 4ca450c522bcd434bafd2ace3f20511e178d1331..c38ba962876e15b4d3df21d405bc3c68a0e5b36a 100644 (file)
 'firewall log' => 'Firewall log',
 'firewall log viewer' => 'Firewall Log Viewer',
 'firewall logs' => 'Firewall Logs',
-'firewall logs ip' => 'Fw-Logdgraphs (IP)',
+'firewall logs ip' => 'Fw-Loggraphs (IP)',
 'firewall logs port' => 'Fw-Loggraphs (Port)',
 'firewallhits' => 'firewallhits',
 'firmware' => 'Firmware',
 'tor configuration' => 'Tor Configuration',
 'tor connected relays' => 'Connected relays',
 'tor contact info' => 'Contact Info',
+'tor daemon' => 'Daemon',
 'tor enabled' => 'Enable Tor',
 'tor errmsg invalid accounting limit' => 'Invalid accounting limit',
 'tor errmsg invalid ip or mask' => 'Invalid IP subnet',
 'tor relay mode relay' => 'Relay only',
 'tor relay nickname' => 'Relay nickname',
 'tor relay port' => 'Relay port',
+'tor service' => 'Tor Service',
 'tor socks port' => 'SOCKS port',
 'tor stats' => 'Statistics',
 'tor traffic limit hard' => 'Traffic limit has been reached.',
 'urlfilter quota restart message' => 'Note: The counters will be reset for all users when restarting the URL filter',
 'urlfilter quota time error' => 'Invalid value for time quota',
 'urlfilter quota user error' => 'At least one username is required',
+'urlfilter redirect template' => 'Redirect page template',
 'urlfilter redirectpage' => 'Redirect to this URL',
 'urlfilter remove file' => 'Remove file from repository',
 'urlfilter renewal' => 'Renewal',
diff --git a/lfs/HTML-Template b/lfs/HTML-Template
new file mode 100644 (file)
index 0000000..fb837ba
--- /dev/null
@@ -0,0 +1,78 @@
+###############################################################################
+#                                                                             #
+# IPFire.org - A linux based firewall                                         #
+# Copyright (C) 2007  Michael Tremer & Christian Schmidt                      #
+#                                                                             #
+# This program is free software: you can redistribute it and/or modify        #
+# it under the terms of the GNU General Public License as published by        #
+# the Free Software Foundation, either version 3 of the License, or           #
+# (at your option) any later version.                                         #
+#                                                                             #
+# This program is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               #
+# GNU General Public License for more details.                                #
+#                                                                             #
+# You should have received a copy of the GNU General Public License           #
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.       #
+#                                                                             #
+###############################################################################
+
+###############################################################################
+# Definitions
+###############################################################################
+
+include Config
+
+VER        = 2.94
+
+THISAPP    = HTML-Template-$(VER)
+DL_FILE    = $(THISAPP).tar.gz
+DL_FROM    = $(URL_IPFIRE)
+DIR_APP    = $(DIR_SRC)/$(THISAPP)
+TARGET     = $(DIR_INFO)/$(THISAPP)
+
+###############################################################################
+# Top-level Rules
+###############################################################################
+
+objects = $(DL_FILE)
+
+$(DL_FILE) = $(DL_FROM)/$(DL_FILE)
+
+$(DL_FILE)_MD5 = 7b7683c3672d55fb922734ea1e9ba7e8
+
+install : $(TARGET)
+
+check : $(patsubst %,$(DIR_CHK)/%,$(objects))
+
+download :$(patsubst %,$(DIR_DL)/%,$(objects))
+
+md5 : $(subst %,%_MD5,$(objects))
+
+###############################################################################
+# Downloading, checking, md5sum
+###############################################################################
+
+$(patsubst %,$(DIR_CHK)/%,$(objects)) :
+       @$(CHECK)
+
+$(patsubst %,$(DIR_DL)/%,$(objects)) :
+       @$(LOAD)
+
+$(subst %,%_MD5,$(objects)) :
+       @$(MD5)
+
+###############################################################################
+# Installation Details
+###############################################################################
+
+$(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
+       @$(PREBUILD)
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && perl Makefile.PL
+       cd $(DIR_APP) && make $(MAKETUNING) $(EXTRA_MAKE)
+       cd $(DIR_APP) && make install
+       @rm -rf $(DIR_APP)
+       @$(POSTBUILD)
+
index 7ada05d633ffb41ead0de679e0433784fd751345..c7bc8a8eb608ee8c6afdd3082247c97c970d6625 100644 (file)
@@ -70,6 +70,8 @@ $(subst %,%_MD5,$(objects)) :
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/bridge-utils-1.5-compile-fix-1.patch
+       cd $(DIR_APP) && find . -name Makefile.in | xargs sed -i -e "s/^KERNEL_HEADERS/#&/g"
        cd $(DIR_APP) && autoreconf -vfi
        cd $(DIR_APP) && ./configure --prefix=/usr
        cd $(DIR_APP) && make $(MAKETUNING)
index 53f65183904898e48d23ff38cd4e965249cfa719..57a9b163ffc5d843a91821d9b89bdefe9ac11ec8 100644 (file)
@@ -81,6 +81,7 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar xaf $(DIR_DL)/$(DL_FILE)
        cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/compat-drivers-3.8-1-u-kref_get_unless_zero.patch
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/compat-drivers-3.8.3-ath_ignore_eeprom_regdomain.patch
 
        # kfifo has no license info and taints kernel
        cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/compat-wireless-2.6.39_kfifo_module_info.patch
index ff02a1991404dd901b3398d0aab80d2e4206757c..9e01b06cfb9c576625c887c46a3bbeba791b2358 100644 (file)
@@ -77,10 +77,6 @@ $(subst %,%_MD5,$(objects)) :
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
-       #cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/hostapd-usb_hw_did_not_ack.patch
-       -cp /usr/src/linux/include/linux/genetlink.h /usr/include/linux/
-       -cp /usr/src/linux/include/linux/netfilter/nfnetlink.h /usr/include/linux/netfilter/
-       -cp /usr/src/linux/include/linux/netfilter/nfnetlink_compat.h /usr/include/linux/netfilter/
        cd $(DIR_APP)/hostapd && cp $(DIR_SRC)/config/hostapd/config ./.config
        cd $(DIR_APP)/hostapd && sed -e "s@/usr/local@/usr@g" -i Makefile
        cd $(DIR_APP)/hostapd && make $(MAKETUNING) $(EXTRA_MAKE)
index a247ba7b37d43954d01e783dba53f9f266f2188a..f3fb70868ea93630200e07a2b2ee6b6a8b6c57a4 100644 (file)
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 1.4.14
+VER        = 1.4.20
 
 THISAPP    = iptables-$(VER)
 DL_FILE    = $(THISAPP).tar.bz2
@@ -38,21 +38,21 @@ TARGET     = $(DIR_INFO)/$(THISAPP)
 objects =      $(DL_FILE) \
                        netfilter-layer7-v2.22.tar.gz \
                        libnfnetlink-1.0.0.tar.bz2 \
-                       libnetfilter_queue-0.0.17.tar.bz2 \
+                       libnetfilter_queue-1.0.1.tar.bz2 \
                        libnetfilter_conntrack-1.0.2.tar.bz2 \
                        libnetfilter_cttimeout-1.0.0.tar.bz2
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 netfilter-layer7-v2.22.tar.gz          = $(URL_IPFIRE)/netfilter-layer7-v2.22.tar.gz
 libnfnetlink-1.0.0.tar.bz2             = $(URL_IPFIRE)/libnfnetlink-1.0.0.tar.bz2
-libnetfilter_queue-0.0.17.tar.bz2      = $(URL_IPFIRE)/libnetfilter_queue-0.0.17.tar.bz2
+libnetfilter_queue-1.0.1.tar.bz2       = $(URL_IPFIRE)/libnetfilter_queue-1.0.1.tar.bz2
 libnetfilter_conntrack-1.0.2.tar.bz2   = $(URL_IPFIRE)/libnetfilter_conntrack-1.0.2.tar.bz2
 libnetfilter_cttimeout-1.0.0.tar.bz2   = $(URL_IPFIRE)/libnetfilter_cttimeout-1.0.0.tar.bz2
 
-$(DL_FILE)_MD5 = 5ab24ad683f76689cfe7e0c73f44855d
+$(DL_FILE)_MD5 = 387b92d3efcf4f07fe31c3bf0f1d18f5
 netfilter-layer7-v2.22.tar.gz_MD5 = 98dff8a3d5a31885b73341633f69501f
 libnfnetlink-1.0.0.tar.bz2_MD5 = 016fdec8389242615024c529acc1adb8
-libnetfilter_queue-0.0.17.tar.bz2_MD5 = 2cde35e678ead3a8f9eb896bf807a159
+libnetfilter_queue-1.0.1.tar.bz2_MD5 = 08b968cb2d36c24deb7f26a69f5d8602
 libnetfilter_conntrack-1.0.2.tar.bz2_MD5 = 447114b5d61bb9a9617ead3217c3d3ff
 libnetfilter_cttimeout-1.0.0.tar.bz2_MD5 = 7697437fc9ebb6f6b83df56a633db7f9
 
@@ -86,8 +86,7 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @rm -rf $(DIR_APP) $(DIR_SRC)/libnfnetlink-1.0.0 $(DIR_SRC)/netfilter-layer7* $(DIR_SRC)/libnetfilter_queue-0.0.17
 
        @cd $(DIR_SRC) && tar jxf $(DIR_DL)/$(DL_FILE)
-       -cd /usr/include && patch -Np1 < $(DIR_SRC)/src/patches/iptables-1.4.6-errorno_includes.patch
-       cp -rf /usr/src/linux/include/linux/netfilter /usr/include/linux
+#      cp -rf /usr/src/linux/include/linux/netfilter /usr/include/linux
 
        # Layer7
        cd $(DIR_SRC) && tar zxf $(DIR_DL)/netfilter-layer7-v2.22.tar.gz
@@ -99,7 +98,6 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
 
        # imq
        cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/iptables-1.4.12-IMQ-test4.diff
-#      chmod +x $(DIR_APP)/extensions/.IMQ-test*
 
        cd $(DIR_APP) && ./configure  --prefix=/usr --with-ksource=/usr/src/linux \
                                    --libdir=/lib --includedir=/usr/include --enable-libipq \
@@ -110,25 +108,25 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        cd $(DIR_APP) && make install
 
        # Iptables doesn't install all headers
-       mkdir -p /usr/include/net/netfilter
-       cp -f $(DIR_APP)/include/net/netfilter/*.h /usr/include/net/netfilter/
-       mkdir -p /usr/include/iptables
-       cp -f $(DIR_APP)/include/iptables/*.h /usr/include/iptables/
-       cp -f $(DIR_APP)/include/iptables.h /usr/include/
-       mkdir -p /usr/include/libipulog
-       cp -f $(DIR_APP)/include/libipulog/*.h /usr/include/libipulog/
-       mkdir -p /usr/include/libiptc
-       cp -f $(DIR_APP)/include/libiptc/*.h /usr/include/libiptc/
+#      mkdir -p /usr/include/net/netfilter
+#      cp -f $(DIR_APP)/include/net/netfilter/*.h /usr/include/net/netfilter/
+#      mkdir -p /usr/include/iptables
+#      cp -f $(DIR_APP)/include/iptables/*.h /usr/include/iptables/
+#      cp -f $(DIR_APP)/include/iptables.h /usr/include/
+#      mkdir -p /usr/include/libipulog
+#      cp -f $(DIR_APP)/include/libipulog/*.h /usr/include/libipulog/
+#      mkdir -p /usr/include/libiptc
+#      cp -f $(DIR_APP)/include/libiptc/*.h /usr/include/libiptc/
 
        cd $(DIR_SRC) && tar xfj $(DIR_DL)/libnfnetlink-1.0.0.tar.bz2
        cd $(DIR_SRC)/libnfnetlink-1.0.0 && ./configure --prefix=/usr
        cd $(DIR_SRC)/libnfnetlink-1.0.0 && make $(MAKETUNING) $(EXTRA_MAKE)
        cd $(DIR_SRC)/libnfnetlink-1.0.0 && make install
 
-       cd $(DIR_SRC) && tar xfj $(DIR_DL)/libnetfilter_queue-0.0.17.tar.bz2
-       cd $(DIR_SRC)/libnetfilter_queue-0.0.17 && ./configure --prefix=/usr
-       cd $(DIR_SRC)/libnetfilter_queue-0.0.17 && make $(MAKETUNING) $(EXTRA_MAKE)
-       cd $(DIR_SRC)/libnetfilter_queue-0.0.17 && make install
+       cd $(DIR_SRC) && tar xfj $(DIR_DL)/libnetfilter_queue-1.0.1.tar.bz2
+       cd $(DIR_SRC)/libnetfilter_queue-1.0.1 && ./configure --prefix=/usr
+       cd $(DIR_SRC)/libnetfilter_queue-1.0.1 && make $(MAKETUNING) $(EXTRA_MAKE)
+       cd $(DIR_SRC)/libnetfilter_queue-1.0.1 && make install
 
        cd $(DIR_SRC) && tar xfj $(DIR_DL)/libnetfilter_conntrack-1.0.2.tar.bz2
        cd $(DIR_SRC)/libnetfilter_conntrack-1.0.2 && ./configure --prefix=/usr
diff --git a/lfs/iptraf-ng b/lfs/iptraf-ng
new file mode 100644 (file)
index 0000000..d049173
--- /dev/null
@@ -0,0 +1,87 @@
+###############################################################################
+# IPFire.org    - An Open Source Firewall Solution                            #
+# Copyright (C) - IPFire Development Team <info@ipfire.org>                   #
+###############################################################################
+
+###############################################################################
+# Definitions
+###############################################################################
+
+include Config
+
+VER        = 1.1.4
+
+THISAPP    = iptraf-ng-$(VER)
+DL_FILE    = $(THISAPP).tar.gz
+DL_FROM    = $(URL_IPFIRE)
+DIR_APP    = $(DIR_SRC)/$(THISAPP)
+TARGET     = $(DIR_INFO)/$(THISAPP)
+PROG       = iptraf-ng
+PAK_VER           = 1
+
+DEPS       = ""
+
+###############################################################################
+# Top-level Rules
+###############################################################################
+
+objects = $(DL_FILE)
+
+$(DL_FILE) = $(DL_FROM)/$(DL_FILE)
+
+$(DL_FILE)_MD5 = de27cfeeede96e2acfb0edc8439b034a
+
+install : $(TARGET)
+
+check : $(patsubst %,$(DIR_CHK)/%,$(objects))
+
+download :$(patsubst %,$(DIR_DL)/%,$(objects))
+
+md5 : $(subst %,%_MD5,$(objects))
+
+dist: 
+       @$(PAK)
+
+###############################################################################
+# Downloading, checking, md5sum
+###############################################################################
+
+$(patsubst %,$(DIR_CHK)/%,$(objects)) :
+       @$(CHECK)
+
+$(patsubst %,$(DIR_DL)/%,$(objects)) :
+       @$(LOAD)
+
+$(subst %,%_MD5,$(objects)) :
+       @$(MD5)
+
+###############################################################################
+# Installation Details
+###############################################################################
+
+
+$(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
+       @$(PREBUILD)
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar xzf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && ./configure \
+               --prefix=/usr
+
+       cd $(DIR_APP) && make $(MAKETUNING) $(EXTRA_MAKE)
+
+       # Binary install
+       cd $(DIR_APP) && install -v -m750 -D iptraf-ng /usr/sbin/iptraf-ng
+       cd $(DIR_APP) && install -v -m750 -D rvnamed-ng /usr/sbin/rvnamed-ng
+
+       # Directory install
+       -mkdir -vp /var/log/iptraf-ng
+       chmod 750 /var/log/iptraf-ng
+       chown root.root /var/log/iptraf-ng
+       -mkdir -vp /var/lib/iptraf-ng
+       chmod 750 /var/lib/iptraf-ng
+       chown root.root /var/lib/iptraf-ng
+       -mkdir -vp /var/lock/iptraf-ng
+       chmod 755 /var/lock/iptraf-ng
+       chown root.root /var/lock/iptraf-ng
+
+       @rm -rf $(DIR_APP)
+       @$(POSTBUILD)
diff --git a/lfs/iw b/lfs/iw
index 6a6627fb72d61a03603ed6adab36ba6d804731e4..e55bc5c35da13e8d6158436d09410f70686ee117 100644 (file)
--- a/lfs/iw
+++ b/lfs/iw
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 3.8
+VER        = 3.10
 
 THISAPP    = iw-$(VER)
 DL_FILE    = $(THISAPP).tar.xz
@@ -40,7 +40,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = 2aae163b7c3b581b7d94f34a5d3e47d8
+$(DL_FILE)_MD5 = 07219ad06535bc270f7a8873aba6d5fa
 
 install : $(TARGET)
 
@@ -70,7 +70,7 @@ $(subst %,%_MD5,$(objects)) :
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar axf $(DIR_DL)/$(DL_FILE)
-       cd $(DIR_APP) && CFLAGS+=-I/usr/src/linux/include make $(MAKETUNING)
+       cd $(DIR_APP) && make $(MAKETUNING)
        cd $(DIR_APP) && make install
        @rm -rf $(DIR_APP)
        @$(POSTBUILD)
index 189175122ad2e77e50ae5be498af18ce0a4b1e60..28b7ed8da2cdb4252329878a56dcda02b95bfa22 100644 (file)
@@ -77,7 +77,8 @@ $(subst %,%_MD5,$(objects)) :
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar axf $(DIR_DL)/$(DL_FILE)
-       cd $(DIR_APP) && ./configure --prefix=/usr --sysconfdir=/etc
+       cd $(DIR_APP) && ./configure --prefix=/usr --sysconfdir=/etc \
+               --with-kernel-dir=/usr/include
        cd $(DIR_APP) && make $(MAKETUNING)
        cd $(DIR_APP) && make install
 
index ea557d9132f5c2b6dace8842d514f715ae4e0cd1..a357935140c3f6d753b6b21d4a37db0fa2a17433 100644 (file)
--- a/lfs/libnl
+++ b/lfs/libnl
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 1.1
+VER        = 1.1.4
 
 THISAPP    = libnl-$(VER)
 DL_FILE    = $(THISAPP).tar.gz
@@ -40,7 +40,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = ae970ccd9144e132b68664f98e7ceeb1
+$(DL_FILE)_MD5 = 580cb878be536804daca87fb75ae46cc
 
 install : $(TARGET)
 
@@ -73,13 +73,6 @@ $(subst %,%_MD5,$(objects)) :
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
-       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/libnl-1.1-ULONG_MAX.patch
-       cd $(DIR_APP) && ln -s /usr/src/linux/include/linux/if_vlan.h \
-                                             include/linux/if_vlan.h
-       cd $(DIR_APP) && ln -s /usr/src/linux/include/linux/netfilter/nf_conntrack_common.h \
-                                             include/linux/netfilter/nf_conntrack_common.h
-       cd $(DIR_APP) && ln -s /usr/src/linux/include/linux/netfilter/nf_conntrack_tcp.h \
-                                             include/linux/netfilter/nf_conntrack_tcp.h
        cd $(DIR_APP) && ./configure --prefix=/usr
        cd $(DIR_APP) && make $(MAKETUNING)
        cd $(DIR_APP) && make install
index 094f09e6455ede9742fc993e24a33d9eff44b088..2ca7696cc08ffc33ba849aec9c0d6f3c2794f8e9 100644 (file)
--- a/lfs/linux
+++ b/lfs/linux
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 3.2.48
+VER        = 3.10.9
 
 RPI_PATCHES = linux-3.2.27-ada8b44
 
@@ -35,7 +35,7 @@ DIR_APP    = $(DIR_SRC)/$(THISAPP)
 CFLAGS     =
 CXXFLAGS   =
 
-PAK_VER    = 30
+PAK_VER    = 31
 DEPS      = ""
 
 VERSUFIX=ipfire$(KCFG)
@@ -71,7 +71,7 @@ objects =$(DL_FILE) \
 $(DL_FILE)                             = $(URL_IPFIRE)/$(DL_FILE)
 rpi-patches-$(RPI_PATCHES).patch.xz    = $(URL_IPFIRE)/rpi-patches-$(RPI_PATCHES).patch.xz
 
-$(DL_FILE)_MD5                         = f560aa5fcf07e57ea0ca66fdfdb53ef1
+$(DL_FILE)_MD5                         = 28d1e1117c30fdd861f70ac0f9b677aa
 rpi-patches-$(RPI_PATCHES).patch.xz_MD5        = 966687ff27e450e04ff50e0da829dc00
 
 
@@ -109,16 +109,29 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        ln -svf linux-$(VER) $(DIR_SRC)/linux
 
        # Linux Intermediate Queueing Device
-       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.2-imq.patch
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/imq_kernel3.10.patch
 
        # ipp2p 0.8.2-ipfire
-       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.0-ipp2p-0.8.2-ipfire.patch
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.10-ipp2p-0.8.2-ipfire.patch
 
        # Layer7-patch
-       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/netfilter_layer7_2.22_kernel3.0.patch
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/netfilter_layer7_2.22_kernel3.10-no_proc_interface.patch
+
+       # Grsecurity-patches
+ifneq "$(KCFG)" "-headers"
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/grsecurity-2.9.1-3.10.9-201308202015.patch
+       cd $(DIR_APP) && rm localversion-grsec
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.7-disable-compat_vdso.patch
+endif
+
+       # Disable pcspeaker autoload
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6.30-no-pcspkr-modalias.patch
+
+       # Remove ACPI Blacklist message
+       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6-silence-acpi-blacklist.patch
 
        # Add LED trigger
-       cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.2.33-ledtrig-netdev-1.patch
+#      cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.2.33-ledtrig-netdev-1.patch
 
        # Fix uevent PHYSDEVDRIVER
        cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-3.2.33_ipg-fix-driver-name.patch
@@ -220,11 +233,9 @@ ifeq "$(LASTKERNEL)" "1"
        echo "options ipv6 disable_ipv6=1" > /etc/modprobe.d/ipv6
 endif
 
-ifneq "$(MACHINE_TYPE)" "arm"
-       # Disable geode_aes modul
-       mv /lib/modules/$(VER)-$(VERSUFIX)/kernel/drivers/crypto/geode-aes.ko \
+       # Disable geode_aes modul if exist
+       -mv /lib/modules/$(VER)-$(VERSUFIX)/kernel/drivers/crypto/geode-aes.ko \
            /lib/modules/$(VER)-$(VERSUFIX)/kernel/drivers/crypto/geode-aes.ko.off
-endif
 endif
 
        @rm -rf $(DIR_SRC)/patch-o-matic* $(DIR_SRC)/iptables* $(DIR_SRC)/squashfs* $(DIR_SRC)/netfilter-layer7-*
index 7129ed58a3a6c213578c50dd73ba0a999301d46e..a314624882dae6d5fb830a0dabe82dc7a6812540 100644 (file)
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 1.7
+VER        = 1.8
 
 THISAPP    = miniupnpd-$(VER)
 DL_FILE    = $(THISAPP).tar.gz
@@ -42,7 +42,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = 5af9e8332d34a7b490d0d2ed3e674196
+$(DL_FILE)_MD5 = 0d8a8e936d5a0012cb260a3b972acbf3
 
 install : $(TARGET)
 
@@ -76,7 +76,7 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
        cd $(DIR_APP) && make -f Makefile.linux \
-               CFLAGS="$(CFLAGS) -DIPTABLES_143 -I/usr/src/linux/include"
+               CFLAGS="$(CFLAGS) -DIPTABLES_143" LIBS="-lip4tc -lnfnetlink"
        cd $(DIR_APP) && install -m 755 miniupnpd /usr/sbin
        -mkdir -pv /etc/miniupnpd
        cp -vf $(DIR_SRC)/config/miniupnpd/miniupnpd.conf /etc/miniupnpd/miniupnpd.conf
index c17c95f68b5985cb0482953db3a17fec36351d53..08b4815f38bc0a8d2c7a3ebfa57b217f8ed34832 100644 (file)
@@ -70,12 +70,14 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
        @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar jxf $(DIR_DL)/$(DL_FILE)
        cd $(DIR_APP) && patch -Np1 -i $(DIR_SRC)/src/patches/$(THISAPP)-gcc34-3.patch
-       cd $(DIR_APP) && patch -Np1 -i $(DIR_SRC)/src/patches/$(THISAPP)-kernel_headers-2.patch
+       cd $(DIR_APP) && patch -Np1 -i $(DIR_SRC)/src/patches/$(THISAPP)-kernel_headers-3.patch
        cd $(DIR_APP) && patch -Np1 -i $(DIR_SRC)/src/patches/$(THISAPP)-mii_ioctl-1.patch
        cd $(DIR_APP) && yes "" | make config
        cd $(DIR_APP) && sed -i -e 's|HAVE_IP_TOOLS 0|HAVE_IP_TOOLS 1|g' \
+                               -e 's|HAVE_HWSTRIP 1|HAVE_HWSTRIP 0|g' \
                                        -e 's|HAVE_MII 0|HAVE_MII 1|g' config.h
        cd $(DIR_APP) && sed -i -e 's|# HAVE_IP_TOOLS=0|HAVE_IP_TOOLS=1|g' \
+                               -e 's|HAVE_HWSTRIP 1|HAVE_HWSTRIP 0|g' \
                                        -e 's|# HAVE_MII=0|HAVE_MII=1|g' config.make
        cd $(DIR_APP) && make
        cd $(DIR_APP) && make update
index b5980087f485760c81c9a59b57e49fdd2ba4c259..0171627a4ac1fd50216395678845e1e2d64c4925 100644 (file)
--- a/lfs/samba
+++ b/lfs/samba
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 3.6.17
+VER        = 3.6.18
 
 THISAPP    = samba-$(VER)
 DL_FILE    = $(THISAPP).tar.gz
@@ -32,7 +32,7 @@ DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
 PROG       = samba
-PAK_VER    = 51
+PAK_VER    = 52
 
 DEPS       = "cups"
 
@@ -44,7 +44,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = c67c3330545c8f1f7ee26e017c28439b
+$(DL_FILE)_MD5 = c7eec3e83fe4c4750240a8a0a214bbd4
 
 install : $(TARGET)
 
index 81118c2c39663bb2cdf5280b316a3c47ead22124..4a71b4da2007a0f6679f588f74b7b691641b74f8 100644 (file)
--- a/lfs/squid
+++ b/lfs/squid
 
 include Config
 
-VER        = 3.1.23
+VER        = 3.3.8
 
 THISAPP    = squid-$(VER)
-DL_FILE    = $(THISAPP).tar.bz2
+DL_FILE    = $(THISAPP).tar.xz
 DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
@@ -40,7 +40,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = e15fdb8c615cf1f9525be0a2b75c60a7
+$(DL_FILE)_MD5 = 6a8fa0075f2fbdd899ac4c9d95fe67cb
 
 install : $(TARGET)
 
@@ -69,47 +69,56 @@ $(subst %,%_MD5,$(objects)) :
 
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
-       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar xjf $(DIR_DL)/$(DL_FILE)
-
-       cd $(DIR_APP) && patch -Np0 -i $(DIR_SRC)/src/patches/squid-3.1-10486.patch
-       cd $(DIR_APP) && patch -Np0 -i $(DIR_SRC)/src/patches/squid-3.1-10487.patch
-
-       cd $(DIR_APP) && ./configure --prefix=/usr --disable-nls \
-          --datadir=/usr/lib/squid \
-          --mandir=/usr/share/man --libexecdir=/usr/lib/squid \
-          --localstatedir=/var --sysconfdir=/etc/squid \
-          --enable-poll --enable-icmp --disable-wccp \
-          --enable-ident-lookups \
-          --enable-storeio="aufs,diskd,ufs" --enable-ssl \
-          --enable-underscores --enable-ntlm-fail-open --enable-arp-acl \
-          --enable-http-violations --enable-auth=basic,ntlm \
-          --enable-removal-policies="heap,lru" \
-          --enable-delay-pools --enable-linux-netfilter \
-          --enable-basic-auth-helpers="NCSA,SMB,MSNT,LDAP,multi-domain-NTLM,PAM,squid_radius_auth" \
-          --enable-useragent-log \
-          --enable-referer-log \
-             --enable-snmp \
-          --with-pthreads --with-dl \
-          --with-maxfd="65536" \
-          --with-filedescriptors=65536 \
-          --with-large-files \
-          --with-aio \
-          --enable-async-io=8 \
-          --enable-unlinkd \
-          --enable-ntln-fail-open \
-          --enable-ntlm-auth-helpers="smb_lm,no_check,fakeauth" \
-          --enable-internal-dns \
-          --enable-epoll \
-          --disable-kqueue \
-          --enable-select \
-          --enable-cache-digests \
-          --enable-forw-via-db \
-          --enable-htcp \
-          --enable-ipf-transparent \
-          --enable-kill-parent-hack \
-          --disable-wccpv2 \
-          --enable-icap-client \
-          --disable-esi
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar xaf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && ./configure \
+               --prefix=/usr \
+               --sysconfdir=/etc/squid \
+               --datadir=/usr/lib/squid \
+               --mandir=/usr/share/man \
+               --libexecdir=/usr/lib/squid \
+               --localstatedir=/var \
+               --disable-ipv6 \
+               --enable-poll \
+               --disable-icmp \
+               --disable-wccp \
+               --enable-ident-lookups \
+               --enable-storeio="aufs,diskd,ufs" \
+               --enable-ssl \
+               --enable-underscores \
+               --enable-http-violations \
+               --enable-removal-policies="heap,lru" \
+               --enable-delay-pools \
+               --enable-linux-netfilter \
+               --enable-snmp \
+               --enable-auth \
+               --enable-auth-basic \
+               --enable-auth-digest \
+               --enable-auth-negotiate \
+               --enable-auth-ntlm \
+               --enable-log-daemon-helpers \
+               --enable-url-rewrite-helpers \
+               --enable-build-info \
+               --enable-eui \
+               --with-pthreads \
+               --with-dl \
+               --with-maxfd="65536" \
+               --with-filedescriptors=65536 \
+               --with-large-files \
+               --with-aio \
+               --enable-async-io=8 \
+               --enable-unlinkd \
+               --enable-internal-dns \
+               --enable-epoll \
+               --disable-kqueue \
+               --enable-select \
+               --enable-cache-digests \
+               --enable-forw-via-db \
+               --enable-htcp \
+               --enable-ipf-transparent \
+               --enable-kill-parent-hack \
+               --disable-wccpv2 \
+               --enable-icap-client \
+               --disable-esi
 
        cd $(DIR_APP) && make $(MAKETUNING)
        cd $(DIR_APP) && make install
diff --git a/lfs/tor b/lfs/tor
index 8bce4be8f1541692cf9d44646e4641406de48b2b..6161a56484098f76ff78f92964a56f9febadbe0d 100644 (file)
--- a/lfs/tor
+++ b/lfs/tor
@@ -32,7 +32,7 @@ DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
 PROG       = tor
-PAK_VER    = 1
+PAK_VER    = 3
 
 DEPS       = "libevent2"
 
@@ -107,6 +107,10 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        install -v -m 644 $(DIR_SRC)/config/tor/defaults-torrc \
                /usr/share/tor/defaults-torrc
 
+       # Install start links and backup include file.
+       ln -sf ../init.d/tor /etc/rc.d/rc3.d/S60tor
+       ln -sf ../init.d/tor /etc/rc.d/rc0.d/K40tor
+       ln -sf ../init.d/tor /etc/rc.d/rc6.d/K40tor
        install -v -m 644 $(DIR_SRC)/config/backup/includes/tor \
                         /var/ipfire/backup/addons/includes/tor
        @rm -rf $(DIR_APP)
index 9d5dfa578813bd2736447ca5ac2d912260a526a0..3c1368dcf249c2e6e338d3964e42271a0026bf19 100644 (file)
@@ -24,7 +24,7 @@
 
 include Config
 
-VER        = 2.81
+VER        = 2.82
 
 THISAPP    = transmission-$(VER)
 DL_FILE    = $(THISAPP).tar.xz
@@ -32,7 +32,7 @@ DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
 PROG       = transmission
-PAK_VER    = 8
+PAK_VER    = 9
 
 DEPS       = "libevent2"
 
@@ -44,7 +44,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = db1ad10ecff07150486dab2365ccb3a8
+$(DL_FILE)_MD5 = a5ef870c0410b12d10449c2d36fa4661
 
 install : $(TARGET)
 
index 551fea5269d536970e432df50266cc97f300df90..b13dd65d02d07443798337f2ed1a00ffaf7fa2b8 100644 (file)
 
 include Config
 
-VER        = 2011.12
-# Linare version: git clone git://git.linaro.org/boot/u-boot-linaro-stable.git
-# Branch: origin/Linaro-u-boot-2011.12
+VER        = 2013.07
 
-THISAPP    = uboot-panda-$(VER)
-DL_FILE    = $(THISAPP).tar.xz
+THISAPP    = u-boot-$(VER)
+DL_FILE    = $(THISAPP).tar.bz2
 DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
@@ -43,7 +41,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = 19975e9bb4b10d8e67db84e51fcaa43b
+$(DL_FILE)_MD5 = 8445162690052e6afd4b8f87af2bb557
 
 install : $(TARGET)
 
@@ -75,13 +73,43 @@ dist:
 
 $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
        @$(PREBUILD)
-       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar Jxf $(DIR_DL)/$(DL_FILE)
-       cd $(DIR_APP) && make tools $(MAKETUNING)
-       cd $(DIR_APP) && install tools/mkimage /usr/bin/
-       cd $(DIR_APP) && make omap4_panda_config
-       cd $(DIR_APP) && make $(MAKETUNING)
-       cd $(DIR_APP) && install MLO /boot/
-       cd $(DIR_APP) && install u-boot.bin /boot/
-       cp -vf $(DIR_SRC)/config/u-boot/* /boot/
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar axf $(DIR_DL)/$(DL_FILE)
+
+       # Pandaboard
+       -mkdir -pv /usr/share/u-boot/pandaboard
+       cd $(DIR_APP) && make CROSS_COMPILE="" omap4_panda_config
+       cd $(DIR_APP) && make CROSS_COMPILE="" HOSTCC="gcc $(CFLAGS)"
+       cd $(DIR_APP) && install -v -m 644 MLO u-boot.bin u-boot.img \
+               /usr/share/u-boot/pandaboard
+       cd $(DIR_APP) && make distclean
+
+       # Wandboard Quad
+       -mkdir -pv /usr/share/u-boot/wandboard_quad
+       cd $(DIR_APP) && make CROSS_COMPILE="" wandboard_quad_config
+       cd $(DIR_APP) && make CROSS_COMPILE="" HOSTCC="gcc $(CFLAGS)"
+       cd $(DIR_APP) && install -v -m 644 u-boot.imx \
+               /usr/share/u-boot/wandboard_quad
+       cd $(DIR_APP) && make distclean
+
+       # Wandboard Dual
+       -mkdir -pv /usr/share/u-boot/wandboard_dl
+       cd $(DIR_APP) && make CROSS_COMPILE="" wandboard_dl_config
+       cd $(DIR_APP) && make CROSS_COMPILE="" HOSTCC="gcc $(CFLAGS)"
+       cd $(DIR_APP) && install -v -m 644 u-boot.imx \
+               /usr/share/u-boot/wandboard_dl
+       cd $(DIR_APP) && make distclean
+
+       # Wandboard Solo
+       -mkdir -pv /usr/share/u-boot/wandboard_solo
+       cd $(DIR_APP) && make CROSS_COMPILE="" wandboard_solo_config
+       cd $(DIR_APP) && make CROSS_COMPILE="" HOSTCC="gcc $(CFLAGS)"
+       cd $(DIR_APP) && install -v -m 644 u-boot.imx \
+               /usr/share/u-boot/wandboard_solo
+       cd $(DIR_APP) && make distclean
+
+       # mkimage
+       cd $(DIR_APP) && make CROSS_COMPILE="" HOSTCC="gcc $(CFLAGS)" tools
+       cd $(DIR_APP) && install -v -m 755 tools/mkimage /usr/bin
+
        @rm -rf $(DIR_APP)
        @$(POSTBUILD)
diff --git a/lfs/u-boot-panda b/lfs/u-boot-panda
new file mode 100644 (file)
index 0000000..8441ee6
--- /dev/null
@@ -0,0 +1,85 @@
+###############################################################################
+#                                                                             #
+# IPFire.org - A linux based firewall                                         #
+# Copyright (C) 2007-2011  IPFire Team  <info@ipfire.org>                     #
+#                                                                             #
+# This program is free software: you can redistribute it and/or modify        #
+# it under the terms of the GNU General Public License as published by        #
+# the Free Software Foundation, either version 3 of the License, or           #
+# (at your option) any later version.                                         #
+#                                                                             #
+# This program is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               #
+# GNU General Public License for more details.                                #
+#                                                                             #
+# You should have received a copy of the GNU General Public License           #
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.       #
+#                                                                             #
+###############################################################################
+
+###############################################################################
+# Definitions
+###############################################################################
+
+include Config
+
+VER        = 2011.12
+# Linare version: git clone git://git.linaro.org/boot/u-boot-linaro-stable.git
+# Branch: origin/Linaro-u-boot-2011.12
+
+THISAPP    = uboot-panda-$(VER)
+DL_FILE    = $(THISAPP).tar.xz
+DL_FROM    = $(URL_IPFIRE)
+DIR_APP    = $(DIR_SRC)/$(THISAPP)
+TARGET     = $(DIR_INFO)/$(THISAPP)
+SUP_ARCH   = armv5tel
+
+###############################################################################
+# Top-level Rules
+###############################################################################
+
+objects = $(DL_FILE)
+
+$(DL_FILE) = $(DL_FROM)/$(DL_FILE)
+
+$(DL_FILE)_MD5 = 19975e9bb4b10d8e67db84e51fcaa43b
+
+install : $(TARGET)
+
+check : $(patsubst %,$(DIR_CHK)/%,$(objects))
+
+download :$(patsubst %,$(DIR_DL)/%,$(objects))
+
+md5 : $(subst %,%_MD5,$(objects))
+
+###############################################################################
+# Downloading, checking, md5sum
+###############################################################################
+
+$(patsubst %,$(DIR_CHK)/%,$(objects)) :
+       @$(CHECK)
+
+$(patsubst %,$(DIR_DL)/%,$(objects)) :
+       @$(LOAD)
+
+$(subst %,%_MD5,$(objects)) :
+       @$(MD5)
+
+dist: 
+       @$(PAK)
+
+###############################################################################
+# Installation Details
+###############################################################################
+
+$(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
+       @$(PREBUILD)
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar Jxf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && make omap4_panda_config
+       cd $(DIR_APP) && make $(MAKETUNING)
+       cd $(DIR_APP) && install MLO /boot/
+       cd $(DIR_APP) && install u-boot.bin /boot/
+       cp -vf $(DIR_SRC)/config/u-boot/* /boot/
+       @rm -rf $(DIR_APP)
+       @$(POSTBUILD)
index a18a08a02c26e57db8534ecda72134887c992310..c661a80b049c6b9bbac43342adade5c745c98e65 100644 (file)
@@ -25,7 +25,7 @@
 include Config
 
 
-VER        = 20121111
+VER        = 20130331
 
 THISAPP    = w_scan-$(VER)
 DL_FILE    = $(THISAPP).tar.bz2
@@ -33,7 +33,7 @@ DL_FROM    = $(URL_IPFIRE)
 DIR_APP    = $(DIR_SRC)/$(THISAPP)
 TARGET     = $(DIR_INFO)/$(THISAPP)
 PROG       = w_scan
-PAK_VER    = 4
+PAK_VER    = 5
 
 DEPS       = ""
 
@@ -45,7 +45,7 @@ objects = $(DL_FILE)
 
 $(DL_FILE) = $(DL_FROM)/$(DL_FILE)
 
-$(DL_FILE)_MD5 = 30da05747fed9988e11ebc7745f5e71f
+$(DL_FILE)_MD5 = dfc14a4707321e068a594d6899398df8
 
 install : $(TARGET)
 
diff --git a/lfs/wavemon b/lfs/wavemon
new file mode 100644 (file)
index 0000000..a3df36e
--- /dev/null
@@ -0,0 +1,71 @@
+###############################################################################
+# IPFire.org    - An Open Source Firewall Solution                            #
+# Copyright (C) - IPFire Development Team <info@ipfire.org>                   #
+###############################################################################
+
+###############################################################################
+# Definitions
+###############################################################################
+
+include Config
+
+VER        = 0.7.5
+
+THISAPP    = wavemon-$(VER)
+DL_FILE    = $(THISAPP).tar.bz2
+DL_FROM    = $(URL_IPFIRE)
+DIR_APP    = $(DIR_SRC)/$(THISAPP)
+TARGET     = $(DIR_INFO)/$(THISAPP)
+PROG       = wavemon
+PAK_VER    = 1
+
+DEPS       = ""
+
+###############################################################################
+# Top-level Rules
+###############################################################################
+
+objects = $(DL_FILE)
+
+$(DL_FILE) = $(DL_FROM)/$(DL_FILE)
+
+$(DL_FILE)_MD5 = 77d4a0f099ca98cf98a915adc70694ba
+
+install : $(TARGET)
+
+check : $(patsubst %,$(DIR_CHK)/%,$(objects))
+
+download :$(patsubst %,$(DIR_DL)/%,$(objects))
+
+md5 : $(subst %,%_MD5,$(objects))
+
+dist: 
+       @$(PAK)
+
+###############################################################################
+# Downloading, checking, md5sum
+###############################################################################
+
+$(patsubst %,$(DIR_CHK)/%,$(objects)) :
+       @$(CHECK)
+
+$(patsubst %,$(DIR_DL)/%,$(objects)) :
+       @$(LOAD)
+
+$(subst %,%_MD5,$(objects)) :
+       @$(MD5)
+
+###############################################################################
+# Installation Details
+###############################################################################
+
+$(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
+       @$(PREBUILD)
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar axf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && ./configure \
+               --prefix=/usr
+
+       cd $(DIR_APP) && make $(MAKETUNING)
+       cd $(DIR_APP) && make install
+       @rm -rf $(DIR_APP)
+       @$(POSTBUILD)
diff --git a/lfs/xinetd b/lfs/xinetd
new file mode 100644 (file)
index 0000000..62f34ea
--- /dev/null
@@ -0,0 +1,85 @@
+###############################################################################
+#                                                                             #
+# IPFire.org - A linux based firewall                                         #
+# Copyright (C) 2007  Michael Tremer & Christian Schmidt                      #
+#                                                                             #
+# This program is free software: you can redistribute it and/or modify        #
+# it under the terms of the GNU General Public License as published by        #
+# the Free Software Foundation, either version 3 of the License, or           #
+# (at your option) any later version.                                         #
+#                                                                             #
+# This program is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               #
+# GNU General Public License for more details.                                #
+#                                                                             #
+# You should have received a copy of the GNU General Public License           #
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.       #
+#                                                                             #
+###############################################################################
+
+###############################################################################
+# Definitions
+###############################################################################
+
+include Config
+
+VER        = 2.3.15
+
+THISAPP    = xinetd-$(VER)
+DL_FILE    = $(THISAPP).tar.gz
+DL_FROM    = $(URL_IPFIRE)
+DIR_APP    = $(DIR_SRC)/$(THISAPP)
+TARGET     = $(DIR_INFO)/$(THISAPP)
+PROG       = xinetd
+PAK_VER    = 1
+
+###############################################################################
+# Top-level Rules
+###############################################################################
+
+objects = $(DL_FILE)
+
+$(DL_FILE) = $(DL_FROM)/$(DL_FILE)
+
+$(DL_FILE)_MD5 = 77358478fd58efa6366accae99b8b04c
+
+install : $(TARGET)
+
+check : $(patsubst %,$(DIR_CHK)/%,$(objects))
+
+download :$(patsubst %,$(DIR_DL)/%,$(objects))
+
+md5 : $(subst %,%_MD5,$(objects))
+
+dist: 
+       @$(PAK)
+
+###############################################################################
+# Downloading, checking, md5sum
+###############################################################################
+
+$(patsubst %,$(DIR_CHK)/%,$(objects)) :
+       @$(CHECK)
+
+$(patsubst %,$(DIR_DL)/%,$(objects)) :
+       @$(LOAD)
+
+$(subst %,%_MD5,$(objects)) :
+       @$(MD5)
+
+
+###############################################################################
+# Installation Details
+###############################################################################
+
+$(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects))
+       @$(PREBUILD)
+       @rm -rf $(DIR_APP) && cd $(DIR_SRC) && tar zxf $(DIR_DL)/$(DL_FILE)
+       cd $(DIR_APP) && ./configure --prefix=/usr
+       cd $(DIR_APP) && make $(MAKETUNING)
+       cd $(DIR_APP) && make install
+       -mkdir -pv /etc/xinetd.d
+       cp -f $(DIR_SRC)/config/xinetd/xinetd.conf /etc/xinetd.conf
+       @rm -rf $(DIR_APP)
+       @$(POSTBUILD)
diff --git a/make.sh b/make.sh
index eb9421ca5e00fbbfcb2f87f3cdb109093adc3b74..7ad42b09579f433be38f23222c9052c567cbc244 100755 (executable)
--- a/make.sh
+++ b/make.sh
@@ -25,7 +25,7 @@
 NAME="IPFire"                                                  # Software name
 SNAME="ipfire"                                                 # Short name
 VERSION="2.13"                                                 # Version number
-CORE="72"                                                      # Core Level (Filename)
+CORE="73"                                                      # Core Level (Filename)
 PAKFIRE_CORE="72"                                              # Core Level (PAKFIRE)
 GIT_BRANCH=`git status | head -n1 | cut -d" " -f4`             # Git Branch
 SLOGAN="www.ipfire.org"                                                # Software slogan
@@ -377,7 +377,9 @@ buildipfire() {
   ipfiremake dvb-firmwares
   ipfiremake zd1211-firmware
   ipfiremake rpi-firmware
+  ipfiremake bc
   ipfiremake u-boot
+  ipfiremake u-boot-panda
 
   if [ "${MACHINE_TYPE}" != "arm" ]; then
 
@@ -396,29 +398,29 @@ buildipfire() {
 
     # x86-pae (Native and new XEN) kernel build
     ipfiremake linux                   KCFG="-pae"
-    ipfiremake kvm-kmod                        KCFG="-pae"
-    ipfiremake v4l-dvb                 KCFG="-pae"
-    ipfiremake mISDN                   KCFG="-pae"
+#    ipfiremake kvm-kmod                       KCFG="-pae"
+#    ipfiremake v4l-dvb                        KCFG="-pae"
+#    ipfiremake mISDN                  KCFG="-pae"
     ipfiremake cryptodev               KCFG="-pae"
-    ipfiremake compat-drivers          KCFG="-pae"
-    ipfiremake r8169                   KCFG="-pae"
-    ipfiremake r8168                   KCFG="-pae"
-    ipfiremake r8101                   KCFG="-pae"
-    ipfiremake e1000e                  KCFG="-pae"
-    ipfiremake igb                     KCFG="-pae"
+#    ipfiremake compat-drivers         KCFG="-pae"
+#    ipfiremake r8169                  KCFG="-pae"
+#    ipfiremake r8168                  KCFG="-pae"
+#    ipfiremake r8101                  KCFG="-pae"
+#    ipfiremake e1000e                 KCFG="-pae"
+#    ipfiremake igb                    KCFG="-pae"
 
     # x86 kernel build
     ipfiremake linux                   KCFG=""
-    ipfiremake kvm-kmod                        KCFG=""
-    ipfiremake v4l-dvb                 KCFG=""
-    ipfiremake mISDN                   KCFG=""
+#    ipfiremake kvm-kmod                       KCFG=""
+#    ipfiremake v4l-dvb                        KCFG=""
+#    ipfiremake mISDN                  KCFG=""
     ipfiremake cryptodev               KCFG=""
-    ipfiremake compat-drivers          KCFG=""
-    ipfiremake r8169                   KCFG=""
-    ipfiremake r8168                   KCFG=""
-    ipfiremake r8101                   KCFG=""
-    ipfiremake e1000e                  KCFG=""
-    ipfiremake igb                     KCFG=""
+#    ipfiremake compat-drivers         KCFG=""
+#    ipfiremake r8169                  KCFG=""
+#    ipfiremake r8168                  KCFG=""
+#    ipfiremake r8101                  KCFG=""
+#    ipfiremake e1000e                 KCFG=""
+#    ipfiremake igb                    KCFG=""
 
   else
     # arm-rpi (Raspberry Pi) kernel build
@@ -529,6 +531,7 @@ buildipfire() {
   ipfiremake URI
   ipfiremake HTML-Tagset
   ipfiremake HTML-Parser
+  ipfiremake HTML-Template
   ipfiremake Compress-Zlib
   ipfiremake Digest
   ipfiremake Digest-SHA1
@@ -677,7 +680,6 @@ buildipfire() {
   ipfiremake mpc
   ipfiremake git
   ipfiremake squidclamav
-  ipfiremake bc
   ipfiremake vnstat
   ipfiremake vnstati
   ipfiremake iw
@@ -752,6 +754,7 @@ buildipfire() {
   ipfiremake acpid
   ipfiremake fping
   ipfiremake telnet
+  ipfiremake xinetd
   ipfiremake libgpg-error
   ipfiremake libassuan
   ipfiremake gpgme
@@ -782,6 +785,8 @@ buildipfire() {
   ipfiremake swatch
   ipfiremake tor
   ipfiremake arm
+  ipfiremake wavemon
+  ipfiremake iptraf-ng
   echo Build on $HOSTNAME > $BASEDIR/build/var/ipfire/firebuild
   cat /proc/version >> $BASEDIR/build/var/ipfire/firebuild
   echo >> $BASEDIR/build/var/ipfire/firebuild
index 08a3029a202e802092fe4af0b5b2894a8aae99f2..62d5bea82bb06f18bd918dafa66911b13c1caae3 100644 (file)
@@ -62,13 +62,25 @@ case "$1" in
                eval $(/usr/local/bin/readhash /var/ipfire/ethernet/settings)
 
                if [ -e /var/ipfire/proxy/enable -o -e /var/ipfire/proxy/enable_blue ]; then
-
                        # Add Address to errorpage stylesheet
                        sed "s|XXXhostXXX|$GREEN_ADDRESS|g" /var/ipfire/proxy/errorpage-$ERR_DESIGN.css > \
                                /etc/squid/errorpage.css
 
+                       boot_mesg "Creating Squid swap directories..."
+                       /usr/sbin/squid -z >/dev/null 2>&1
+                       evaluate_retval
+
+                       # Make sure, that the process above has finished.
+                       counter=5
+                       while [ ${counter} -gt 0 ]; do
+                               if pidofproc -s /usr/sbin/squid; then
+                                       sleep 1
+                               else
+                                       break
+                               fi
+                       done
+
                        boot_mesg "Starting Squid Proxy Server..."
-                       loadproc /usr/sbin/squid -z >/dev/null 2>&1
                        loadproc /usr/sbin/squid
                fi
 
index e27241f5617e93b53d68ce760df57ccf0d9ea034..d631e867f915e123e918383b3c9caa86af1e5a71 100644 (file)
@@ -9,9 +9,13 @@
 . /etc/sysconfig/rc
 . ${rc_functions}
 
-function setup_firewall() {
-       eval $(/usr/local/bin/readhash /var/ipfire/tor/settings)
+eval $(/usr/local/bin/readhash /var/ipfire/tor/settings)
+
+function tor_is_enabled() {
+       [ "${TOR_ENABLED}" = "on" ] || [ "${TOR_RELAY_ENABLED}" = "on" ]
+}
 
+function setup_firewall() {
        # Flush all rules.
        flush_firewall
 
@@ -27,6 +31,8 @@ function flush_firewall() {
 
 case "${1}" in
        start)
+               tor_is_enabled || exit 0
+
                # Setup firewall.
                setup_firewall
 
diff --git a/src/paks/xinetd/install.sh b/src/paks/xinetd/install.sh
new file mode 100644 (file)
index 0000000..347fd36
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+############################################################################
+#                                                                          #
+# This file is part of the IPFire Firewall.                                #
+#                                                                          #
+# IPFire is free software; you can redistribute it and/or modify           #
+# it under the terms of the GNU General Public License as published by     #
+# the Free Software Foundation; either version 2 of the License, or        #
+# (at your option) any later version.                                      #
+#                                                                          #
+# IPFire is distributed in the hope that it will be useful,                #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of           #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the            #
+# GNU General Public License for more details.                             #
+#                                                                          #
+# You should have received a copy of the GNU General Public License        #
+# along with IPFire; if not, write to the Free Software                    #
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA #
+#                                                                          #
+# Copyright (C) 2007 IPFire-Team <info@ipfire.org>.                        #
+#                                                                          #
+############################################################################
+#
+. /opt/pakfire/lib/functions.sh
+extract_files
+restore_backup ${NAME}
+
+mkdir /etc/xinetd.d
+
+start_service --background ${NAME}
+
+ln -sf ../init.d/${NAME} /etc/rc.d/rc0.d/K30xinetd
+ln -sf ../init.d/${NAME} /etc/rc.d/rc3.d/S30xinetd
+ln -sf ../init.d/${NAME} /etc/rc.d/rc6.d/K30xinetd
diff --git a/src/paks/xinetd/uninstall.sh b/src/paks/xinetd/uninstall.sh
new file mode 100644 (file)
index 0000000..a7b8a53
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash
+############################################################################
+#                                                                          #
+# This file is part of the IPFire Firewall.                                #
+#                                                                          #
+# IPFire is free software; you can redistribute it and/or modify           #
+# it under the terms of the GNU General Public License as published by     #
+# the Free Software Foundation; either version 2 of the License, or        #
+# (at your option) any later version.                                      #
+#                                                                          #
+# IPFire is distributed in the hope that it will be useful,                #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of           #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the            #
+# GNU General Public License for more details.                             #
+#                                                                          #
+# You should have received a copy of the GNU General Public License        #
+# along with IPFire; if not, write to the Free Software                    #
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA #
+#                                                                          #
+# Copyright (C) 2007 IPFire-Team <info@ipfire.org>.                        #
+#                                                                          #
+############################################################################
+#
+. /opt/pakfire/lib/functions.sh
+stop_service ${NAME}
+make_backup ${NAME}
+remove_files
diff --git a/src/paks/xinetd/update.sh b/src/paks/xinetd/update.sh
new file mode 100644 (file)
index 0000000..89c40d0
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash
+############################################################################
+#                                                                          #
+# This file is part of the IPFire Firewall.                                #
+#                                                                          #
+# IPFire is free software; you can redistribute it and/or modify           #
+# it under the terms of the GNU General Public License as published by     #
+# the Free Software Foundation; either version 2 of the License, or        #
+# (at your option) any later version.                                      #
+#                                                                          #
+# IPFire is distributed in the hope that it will be useful,                #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of           #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the            #
+# GNU General Public License for more details.                             #
+#                                                                          #
+# You should have received a copy of the GNU General Public License        #
+# along with IPFire; if not, write to the Free Software                    #
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA #
+#                                                                          #
+# Copyright (C) 2007 IPFire-Team <info@ipfire.org>.                        #
+#                                                                          #
+############################################################################
+#
+. /opt/pakfire/lib/functions.sh
+./uninstall.sh
+./install.sh
diff --git a/src/patches/bridge-utils-1.5-compile-fix-1.patch b/src/patches/bridge-utils-1.5-compile-fix-1.patch
new file mode 100644 (file)
index 0000000..c269608
--- /dev/null
@@ -0,0 +1,31 @@
+From 5eebb7f9288b7881ffb929b1fd494fe3ac3be27d Mon Sep 17 00:00:00 2001
+From: Russell Senior <russell@personaltelco.net>
+Date: Wed, 06 Mar 2013 20:49:42 +0000
+Subject: bridge-utils: Fix compile against linux-3.8.x
+
+Linux 3.8 has a header, include/uapi/linux/if_bridge.h that uses a
+struct in6_addr but doesn't define it.  The trivial seeming fix of
+including the header that does define it causes more problems.  The
+problem was discussed on mailing lists in January 2013.  The final
+suggestion I found was here:
+
+      http://www.redhat.com/archives/libvir-list/2013-January/msg01253.html
+
+This is intended to implement that suggestion.
+
+Signed-off-by: Russell Senior <russell@personaltelco.net>
+---
+diff --git a/libbridge/libbridge.h b/libbridge/libbridge.h
+index 39964f2..dd14bae 100644
+--- a/libbridge/libbridge.h
++++ b/libbridge/libbridge.h
+@@ -20,6 +20,7 @@
+ #define _LIBBRIDGE_H
+ #include <sys/socket.h>
++#include <netinet/in.h>
+ #include <linux/if.h>
+ #include <linux/if_bridge.h>
+--
+cgit v0.9.2
diff --git a/src/patches/compat-drivers-3.8.3-ath_ignore_eeprom_regdomain.patch b/src/patches/compat-drivers-3.8.3-ath_ignore_eeprom_regdomain.patch
new file mode 100644 (file)
index 0000000..acfb12f
--- /dev/null
@@ -0,0 +1,39 @@
+diff -Naur compat-drivers-3.8.3-2-snpu.org/drivers/net/wireless/ath/regd.c compat-drivers-3.8.3-2-snpu/drivers/net/wireless/ath/regd.c
+--- compat-drivers-3.8.3-2-snpu.org/drivers/net/wireless/ath/regd.c    2013-03-15 22:55:09.000000000 +0100
++++ compat-drivers-3.8.3-2-snpu/drivers/net/wireless/ath/regd.c        2013-08-22 11:51:36.793297656 +0200
+@@ -200,6 +200,8 @@
+       u32 bandwidth = 0;
+       int r;
++      return;
++
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!wiphy->bands[band])
+@@ -259,6 +261,8 @@
+       u32 bandwidth = 0;
+       int r;
++      return;
++
+       sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+       if (!sband)
+               return;
+@@ -308,6 +312,8 @@
+       struct ieee80211_channel *ch;
+       unsigned int i;
++      return;
++
+       if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+               return;
+@@ -514,6 +520,8 @@
+ {
+       const struct ieee80211_regdomain *regd;
++      return 0;
++
+       wiphy->reg_notifier = reg_notifier;
+       wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
diff --git a/src/patches/grsecurity-2.9.1-3.10.9-201308202015.patch b/src/patches/grsecurity-2.9.1-3.10.9-201308202015.patch
new file mode 100644 (file)
index 0000000..24d81a0
--- /dev/null
@@ -0,0 +1,109106 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index b89a739..79768fb 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -2,9 +2,11 @@
+ *.aux
+ *.bin
+ *.bz2
++*.c.[012]*.*
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -14,6 +16,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -48,14 +51,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -69,9 +75,11 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -80,6 +88,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -92,19 +101,24 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+ config
+ config-*
+ config_data.h*
++config.c
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+@@ -115,9 +129,11 @@ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -145,14 +164,14 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+ kxgettext
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -162,14 +181,15 @@ mach-types.h
+ machtypes.h
+ map
+ map_hugetlb
+-media
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -185,6 +205,8 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
++pasyms.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -194,6 +216,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -203,7 +226,10 @@ r200_reg_safe.h
+ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
++realmode.lds
++realmode.relocs
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -213,8 +239,12 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++size_overflow_hash.h
+ sImage
++slabinfo
+ sm_tbl*
++sortextable
+ split-include
+ syscalltab.h
+ tables.c
+@@ -224,6 +254,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -235,13 +266,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -249,9 +284,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 2fe6e76..889ee23 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
+                       Default: 1024
++      grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++                      ignore grsecurity's /proc restrictions
++
++
+       hashdist=       [KNL,NUMA] Large hashes allocated during boot
+                       are distributed across NUMA nodes.  Defaults on
+                       for 64-bit NUMA, off otherwise.
+@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       noexec=on: enable non-executable mappings (default)
+                       noexec=off: disable non-executable mappings
++      nopcid          [X86-64]
++                      Disable PCID (Process-Context IDentifier) even if it
++                      is supported by the processor.
++
+       nosmap          [X86]
+                       Disable SMAP (Supervisor Mode Access Prevention)
+                       even if it is supported by processor.
+@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       the specified number of seconds.  This is to be used if
+                       your oopses keep scrolling off the screen.
++      pax_nouderef    [X86] disables UDEREF.  Most likely needed under certain
++                      virtualization environments that don't cope well with the
++                      expand down segment used by UDEREF on X86-32 or the frequent
++                      page table updates on X86-64.
++
++      pax_sanitize_slab=
++                      0/1 to disable/enable slab object sanitization (enabled by
++                      default).
++
++      pax_softmode=   0/1 to disable/enable PaX softmode on boot already.
++
++      pax_extra_latent_entropy
++                      Enable a very simple form of latent entropy extraction
++                      from the first 4GB of memory as the bootmem allocator
++                      passes the memory pages to the buddy allocator.
++
++      pax_weakuderef  [X86-64] enables the weaker but faster form of UDEREF
++                      when the processor supports PCID.
++
+       pcbit=          [HW,ISDN]
+       pcd.            [PARIDE]
+diff --git a/Makefile b/Makefile
+index 4b31d62..ac99d49 100644
+--- a/Makefile
++++ b/Makefile
+@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ HOSTCC       = gcc
+ HOSTCXX      = g++
+-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS   = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
+ # Rules shared between *config targets and build targets
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+       $(Q)$(MAKE) $(build)=scripts/basic
+       $(Q)rm -f .tmp_quiet_recordmcount
+@@ -576,6 +577,65 @@ else
+ KBUILD_CFLAGS += -O2
+ endif
++ifndef DISABLE_PAX_PLUGINS
++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
++else
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
++endif
++ifneq ($(PLUGINCC),)
++ifdef CONFIG_PAX_CONSTIFY_PLUGIN
++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++ifdef CONFIG_PAX_SIZE_OVERFLOW
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
++STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
++endif
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
++export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++      $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++      $(error Your gcc installation does not support plugins.  If the necessary headers for plugin support are missing, they should be installed.  On Debian, apt-get install gcc-<ver>-plugin-dev.  If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
++else
++      $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++      $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure.  PAX_SIZE_OVERFLOW will not be active."
++endif
++endif
++
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+ ifdef CONFIG_READABLE_ASM
+@@ -733,7 +793,7 @@ export mod_sign_cmd
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y                += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y                += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ vmlinux-dirs  := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+                    $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -782,6 +842,8 @@ endif
+ # The actual objects are generated when descending, 
+ # make sure no implicit rule kicks in
++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+ # Error messages still appears in the original language
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+       $(Q)$(MAKE) $(build)=$@
+ # Store (new) KERNELRELASE string in include/config/kernel.release
+@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
+       $(Q)$(MAKE) $(build)=.
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+ # Generate some files
+@@ -942,6 +1005,8 @@ all: modules
+ #     using awk while concatenating to the final file.
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+       $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+       @$(kecho) '  Building modules, stage 2.';
+@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+ # Target to install modules
+ PHONY += modules_install
+@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
+                 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
+                 signing_key.priv signing_key.x509 x509.genkey         \
+                 extra_certificates signing_key.x509.keyid             \
+-                signing_key.x509.signer
++                signing_key.x509.signer tools/gcc/size_overflow_hash.h
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1063,6 +1128,7 @@ distclean: mrproper
+               \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+               -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+               -o -name '.*.rej' \
++              -o -name '.*.rej' -o -name '*.so' \
+               -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+               -type f -print | xargs rm -f
+@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+       $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(module-dirs)
+       @$(kecho) '  Building modules, stage 2.';
+       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1359,17 +1427,21 @@ else
+         target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+       $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1379,11 +1451,15 @@ endif
+       $(cmd_crmodverdir)
+       $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+       $(build)=$(build-dir)
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%/: gcc-plugins prepare scripts FORCE
+       $(cmd_crmodverdir)
+       $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+       $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.ko: gcc-plugins prepare scripts FORCE
+       $(cmd_crmodverdir)
+       $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1)   \
+       $(build)=$(build-dir) $(@:.ko=.o)
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index c2cbe4f..f7264b4 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec()   smp_mb()
+ #define smp_mb__after_atomic_dec()    smp_mb()
+ #define smp_mb__before_atomic_inc()   smp_mb()
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
++#include <linux/const.h>
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES     64
+ # define L1_CACHE_SHIFT     6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+    direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES     32
+ # define L1_CACHE_SHIFT     5
+ #endif
++#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES    L1_CACHE_BYTES
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 968d999..d36b2df 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #define ELF_ET_DYN_BASE               (TASK_UNMAPPED_BASE + 0x1000000)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN   (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be 
+    registered using atexit.  This provides a mean for the dynamic
+    linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index bc2a0da..8ad11ee 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+       pgd_set(pgd, pmd);
+ }
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++      pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index 81a4342..348b927 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -102,6 +102,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED   __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY     __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC     __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL   __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 2fd00b7..cfd5069 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+       /* The small sections were sorted to the end of the segment.
+          The following should definitely cover them.  */
+-      gp = (u64)me->module_core + me->core_size - 0x8000;
++      gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+       got = sechdrs[me->arch.gotsecindex].sh_addr;
+       for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index b9e37ad..44c24e7 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+-                       unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++                       unsigned long limit, unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       info.flags = 0;
+       info.length = len;
+@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+       info.high_limit = limit;
+       info.align_mask = 0;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+          merely specific addresses, but regions of memory -- perhaps
+          this feature should be incorporated into all ports?  */
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+-              addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++              addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+               if (addr != (unsigned long) -ENOMEM)
+                       return addr;
+       }
+       /* Next, try allocating at TASK_UNMAPPED_BASE.  */
+-      addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+-                                       len, limit);
++      addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+       if (addr != (unsigned long) -ENOMEM)
+               return addr;
+       /* Finally, try allocating in low memory.  */
+-      addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++      addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+       return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 0c4132d..88f0d53 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+       __reload_thread(pcb);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int ldah, ldq, jmp;
++
++              err = get_user(ldah, (unsigned int *)regs->pc);
++              err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++              err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++                  (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++                  jmp == 0x6BFB0000U)
++              {
++                      unsigned long r27, addr;
++                      unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++                      unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++                      addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++                      err = get_user(r27, (unsigned long *)addr);
++                      if (err)
++                              break;
++
++                      regs->r27 = r27;
++                      regs->pc = r27;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ldah, lda, br;
++
++              err = get_user(ldah, (unsigned int *)regs->pc);
++              err |= get_user(lda, (unsigned int *)(regs->pc+4));
++              err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++                  (lda & 0xFFFF0000U) == 0xA77B0000U &&
++                  (br & 0xFFE00000U) == 0xC3E00000U)
++              {
++                      unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++                      unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++                      unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++                      regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++                      regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation */
++              unsigned int br;
++
++              err = get_user(br, (unsigned int *)regs->pc);
++
++              if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++                      unsigned int br2, ldq, nop, jmp;
++                      unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++                      addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++                      err = get_user(br2, (unsigned int *)addr);
++                      err |= get_user(ldq, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      err |= get_user(jmp, (unsigned int *)(addr+12));
++                      err |= get_user(resolver, (unsigned long *)(addr+16));
++
++                      if (err)
++                              break;
++
++                      if (br2 == 0xC3600000U &&
++                          ldq == 0xA77B000CU &&
++                          nop == 0x47FF041FU &&
++                          jmp == 0x6B7B0000U)
++                      {
++                              regs->r28 = regs->pc+4;
++                              regs->r27 = addr+16;
++                              regs->pc = resolver;
++                              return 3;
++                      }
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
+ /*
+  * This routine handles page faults.  It determines the address,
+@@ -133,8 +251,29 @@ retry:
+  good_area:
+       si_code = SEGV_ACCERR;
+       if (cause < 0) {
+-              if (!(vma->vm_flags & VM_EXEC))
++              if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++                      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++                              goto bad_area;
++
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 2:
++                      case 3:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++                      do_group_exit(SIGKILL);
++#else
+                       goto bad_area;
++#endif
++
++              }
+       } else if (!cause) {
+               /* Allow reads even for write-only mappings */
+               if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 18a9f5e..ca910b7 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
+ config UACCESS_WITH_MEMCPY
+       bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+-      depends on MMU
++      depends on MMU && !PAX_MEMORY_UDEREF
+       default y if CPU_FEROCEON
+       help
+         Implement faster copy_to_user and clear_user methods for CPU
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index da1c77d..2ee6056 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -17,17 +17,35 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i)        { (i) }
+ #ifdef __KERNEL__
++#define _ASM_EXTABLE(from, to)                \
++"     .pushsection __ex_table,\"a\"\n"\
++"     .align  3\n"                    \
++"     .long   " #from ", " #to"\n"    \
++"     .popsection"
++
+ /*
+  * On ARM, ordinary assignment (str instruction) doesn't clear the local
+  * strex/ldrex monitor on some implementations. The reason we can use it for
+  * atomic_set() is the clrex or dummy strex done on every exception return.
+  */
+ #define atomic_read(v)        (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return v->counter;
++}
+ #define atomic_set(v,i)       (((v)->counter) = (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      v->counter = i;
++}
+ #if __LINUX_ARM_ARCH__ >= 6
+@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
+       int result;
+       __asm__ __volatile__("@ atomic_add\n"
++"1:   ldrex   %1, [%3]\n"
++"     adds    %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strex   %1, %0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "Ir" (i)
++      : "cc");
++}
++
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++      unsigned long tmp;
++      int result;
++
++      __asm__ __volatile__("@ atomic_add_unchecked\n"
+ "1:   ldrex   %0, [%3]\n"
+ "     add     %0, %0, %4\n"
+ "     strex   %1, %0, [%3]\n"
+@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
+       smp_mb();
+       __asm__ __volatile__("@ atomic_add_return\n"
++"1:   ldrex   %1, [%3]\n"
++"     adds    %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"     mov     %0, %1\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strex   %1, %0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "Ir" (i)
++      : "cc");
++
++      smp_mb();
++
++      return result;
++}
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++      unsigned long tmp;
++      int result;
++
++      smp_mb();
++
++      __asm__ __volatile__("@ atomic_add_return_unchecked\n"
+ "1:   ldrex   %0, [%3]\n"
+ "     add     %0, %0, %4\n"
+ "     strex   %1, %0, [%3]\n"
+@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
+       int result;
+       __asm__ __volatile__("@ atomic_sub\n"
++"1:   ldrex   %1, [%3]\n"
++"     subs    %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strex   %1, %0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "Ir" (i)
++      : "cc");
++}
++
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++      unsigned long tmp;
++      int result;
++
++      __asm__ __volatile__("@ atomic_sub_unchecked\n"
+ "1:   ldrex   %0, [%3]\n"
+ "     sub     %0, %0, %4\n"
+ "     strex   %1, %0, [%3]\n"
+@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+       smp_mb();
+       __asm__ __volatile__("@ atomic_sub_return\n"
+-"1:   ldrex   %0, [%3]\n"
+-"     sub     %0, %0, %4\n"
++"1:   ldrex   %1, [%3]\n"
++"     subs    %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"     mov     %0, %1\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
+ "     strex   %1, %0, [%3]\n"
+ "     teq     %1, #0\n"
+ "     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+       return oldval;
+ }
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++      unsigned long oldval, res;
++
++      smp_mb();
++
++      do {
++              __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++              "ldrex  %1, [%3]\n"
++              "mov    %0, #0\n"
++              "teq    %1, %4\n"
++              "strexeq %0, %5, [%3]\n"
++                  : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++                  : "r" (&ptr->counter), "Ir" (old), "r" (new)
++                  : "cc");
++      } while (res);
++
++      smp_mb();
++
++      return oldval;
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+       unsigned long tmp, tmp2;
+@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
+       return val;
+ }
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++      return atomic_add_return(i, v);
++}
++
+ #define atomic_add(i, v)      (void) atomic_add_return(i, v)
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++      (void) atomic_add_return(i, v);
++}
+ static inline int atomic_sub_return(int i, atomic_t *v)
+ {
+@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+       return val;
+ }
+ #define atomic_sub(i, v)      (void) atomic_sub_return(i, v)
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++      (void) atomic_sub_return(i, v);
++}
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+       return ret;
+ }
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return atomic_cmpxchg(v, old, new);
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+       unsigned long flags;
+@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ #endif /* __LINUX_ARM_ARCH__ */
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+ #define atomic_inc(v)         atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v)         atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      atomic_sub_unchecked(1, v);
++}
+ #define atomic_inc_and_test(v)        (atomic_add_return(1, v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v) == 0;
++}
+ #define atomic_dec_and_test(v)        (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v)    (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v)    (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -241,6 +428,14 @@ typedef struct {
+       u64 __aligned(8) counter;
+ } atomic64_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+ #ifdef CONFIG_ARM_LPAE
+@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
+       return result;
+ }
++static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      u64 result;
++
++      __asm__ __volatile__("@ atomic64_read_unchecked\n"
++"     ldrd    %0, %H0, [%1]"
++      : "=&r" (result)
++      : "r" (&v->counter), "Qo" (v->counter)
++      );
++
++      return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, u64 i)
+ {
+       __asm__ __volatile__("@ atomic64_set\n"
+@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+       : "r" (&v->counter), "r" (i)
+       );
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
++{
++      __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"     strd    %2, %H2, [%1]"
++      : "=Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      );
++}
+ #else
+ static inline u64 atomic64_read(const atomic64_t *v)
+ {
+@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
+       return result;
+ }
++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++      u64 result;
++
++      __asm__ __volatile__("@ atomic64_read_unchecked\n"
++"     ldrexd  %0, %H0, [%1]"
++      : "=&r" (result)
++      : "r" (&v->counter), "Qo" (v->counter)
++      );
++
++      return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, u64 i)
+ {
+       u64 tmp;
+@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+       : "r" (&v->counter), "r" (i)
+       : "cc");
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
++{
++      u64 tmp;
++
++      __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1:   ldrexd  %0, %H0, [%2]\n"
++"     strexd  %0, %3, %H3, [%2]\n"
++"     teq     %0, #0\n"
++"     bne     1b"
++      : "=&r" (tmp), "=Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      : "cc");
++}
++
+ #endif
+ static inline void atomic64_add(u64 i, atomic64_t *v)
+@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+       __asm__ __volatile__("@ atomic64_add\n"
+ "1:   ldrexd  %0, %H0, [%3]\n"
+ "     adds    %0, %0, %4\n"
++"     adcs    %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strexd  %1, %0, %H0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      : "cc");
++}
++
++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++      u64 result;
++      unsigned long tmp;
++
++      __asm__ __volatile__("@ atomic64_add_unchecked\n"
++"1:   ldrexd  %0, %H0, [%3]\n"
++"     adds    %0, %0, %4\n"
+ "     adc     %H0, %H0, %H4\n"
+ "     strexd  %1, %0, %H0, [%3]\n"
+ "     teq     %1, #0\n"
+@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ {
+-      u64 result;
+-      unsigned long tmp;
++      u64 result, tmp;
+       smp_mb();
+       __asm__ __volatile__("@ atomic64_add_return\n"
++"1:   ldrexd  %1, %H1, [%3]\n"
++"     adds    %0, %1, %4\n"
++"     adcs    %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"     mov     %0, %1\n"
++"     mov     %H0, %H1\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strexd  %1, %0, %H0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      : "cc");
++
++      smp_mb();
++
++      return result;
++}
++
++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++      u64 result;
++      unsigned long tmp;
++
++      smp_mb();
++
++      __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
+ "1:   ldrexd  %0, %H0, [%3]\n"
+ "     adds    %0, %0, %4\n"
+ "     adc     %H0, %H0, %H4\n"
+@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+       __asm__ __volatile__("@ atomic64_sub\n"
+ "1:   ldrexd  %0, %H0, [%3]\n"
+ "     subs    %0, %0, %4\n"
++"     sbcs    %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
++"     strexd  %1, %0, %H0, [%3]\n"
++"     teq     %1, #0\n"
++"     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      : "cc");
++}
++
++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++      u64 result;
++      unsigned long tmp;
++
++      __asm__ __volatile__("@ atomic64_sub_unchecked\n"
++"1:   ldrexd  %0, %H0, [%3]\n"
++"     subs    %0, %0, %4\n"
+ "     sbc     %H0, %H0, %H4\n"
+ "     strexd  %1, %0, %H0, [%3]\n"
+ "     teq     %1, #0\n"
+@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+ {
+-      u64 result;
+-      unsigned long tmp;
++      u64 result, tmp;
+       smp_mb();
+       __asm__ __volatile__("@ atomic64_sub_return\n"
+-"1:   ldrexd  %0, %H0, [%3]\n"
+-"     subs    %0, %0, %4\n"
+-"     sbc     %H0, %H0, %H4\n"
++"1:   ldrexd  %1, %H1, [%3]\n"
++"     subs    %0, %1, %4\n"
++"     sbcs    %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"     mov     %0, %1\n"
++"     mov     %H0, %H1\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
+ "     strexd  %1, %0, %H0, [%3]\n"
+ "     teq     %1, #0\n"
+ "     bne     1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (i)
+       : "cc");
+@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+       return oldval;
+ }
++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
++{
++      u64 oldval;
++      unsigned long res;
++
++      smp_mb();
++
++      do {
++              __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
++              "ldrexd         %1, %H1, [%3]\n"
++              "mov            %0, #0\n"
++              "teq            %1, %4\n"
++              "teqeq          %H1, %H4\n"
++              "strexdeq       %0, %5, %H5, [%3]"
++              : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++              : "r" (&ptr->counter), "r" (old), "r" (new)
++              : "cc");
++      } while (res);
++
++      smp_mb();
++
++      return oldval;
++}
++
+ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ {
+       u64 result;
+@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+ {
+-      u64 result;
+-      unsigned long tmp;
++      u64 result, tmp;
+       smp_mb();
+       __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+-"1:   ldrexd  %0, %H0, [%3]\n"
+-"     subs    %0, %0, #1\n"
+-"     sbc     %H0, %H0, #0\n"
++"1:   ldrexd  %1, %H1, [%3]\n"
++"     subs    %0, %1, #1\n"
++"     sbcs    %H0, %H1, #0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"     mov     %0, %1\n"
++"     mov     %H0, %H1\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
+ "     teq     %H0, #0\n"
+-"     bmi     2f\n"
++"     bmi     4f\n"
+ "     strexd  %1, %0, %H0, [%3]\n"
+ "     teq     %1, #0\n"
+ "     bne     1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter)
+       : "cc");
+@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+ "     teq     %0, %5\n"
+ "     teqeq   %H0, %H5\n"
+ "     moveq   %1, #0\n"
+-"     beq     2f\n"
++"     beq     4f\n"
+ "     adds    %0, %0, %6\n"
+-"     adc     %H0, %H0, %H6\n"
++"     adcs    %H0, %H0, %H6\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     bvc     3f\n"
++"2:   bkpt    0xf103\n"
++"3:\n"
++#endif
++
+ "     strexd  %2, %0, %H0, [%4]\n"
+ "     teq     %2, #0\n"
+ "     bne     1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
+       : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (u), "r" (a)
+       : "cc");
+@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+ #define atomic64_add_negative(a, v)   (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v)                       atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v)     atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return(v)                atomic64_add_return(1LL, (v))
++#define atomic64_inc_return_unchecked(v)      atomic64_add_return_unchecked(1LL, (v))
+ #define atomic64_inc_and_test(v)      (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v)   (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v)                       atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v)     atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return(v)                atomic64_sub_return(1LL, (v))
+ #define atomic64_dec_and_test(v)      (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..ba3dee4 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT                CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+@@ -24,5 +26,6 @@
+ #endif
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__ ((__section__(".data..read_only")))
+ #endif
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 17d0ae8..014e350 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -116,7 +116,7 @@ struct cpu_cache_fns {
+       void (*dma_unmap_area)(const void *, size_t, int);
+       void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+ /*
+  * Select the calling method
+diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
+index 6dcc164..b14d917 100644
+--- a/arch/arm/include/asm/checksum.h
++++ b/arch/arm/include/asm/checksum.h
+@@ -37,7 +37,19 @@ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+ __wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++
++static inline __wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
++{
++      __wsum ret;
++      pax_open_userland();
++      ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
++      pax_close_userland();
++      return ret;
++}
++
++
+ /*
+  *    Fold a partial checksum without adding pseudo headers
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index 4f009c1..466c59b 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ #define xchg(ptr,x) \
+       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++#define xchg_unchecked(ptr,x) \
++      ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+ #include <asm-generic/cmpxchg-local.h>
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index 6ddbe44..b5e38b1 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -48,18 +48,37 @@
+  * Domain types
+  */
+ #define DOMAIN_NOACCESS       0
+-#define DOMAIN_CLIENT 1
+ #ifdef CONFIG_CPU_USE_DOMAINS
++#define DOMAIN_USERCLIENT     1
++#define DOMAIN_KERNELCLIENT   1
+ #define DOMAIN_MANAGER        3
++#define DOMAIN_VECTORS                DOMAIN_USER
+ #else
++
++#ifdef CONFIG_PAX_KERNEXEC
+ #define DOMAIN_MANAGER        1
++#define DOMAIN_KERNEXEC       3
++#else
++#define DOMAIN_MANAGER        1
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define DOMAIN_USERCLIENT     0
++#define DOMAIN_UDEREF         1
++#define DOMAIN_VECTORS                DOMAIN_KERNEL
++#else
++#define DOMAIN_USERCLIENT     1
++#define DOMAIN_VECTORS                DOMAIN_USER
++#endif
++#define DOMAIN_KERNELCLIENT   1
++
+ #endif
+ #define domain_val(dom,type)  ((type) << (2*(dom)))
+ #ifndef __ASSEMBLY__
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ static inline void set_domain(unsigned val)
+ {
+       asm volatile(
+@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
+       isb();
+ }
+-#define modify_domain(dom,type)                                       \
+-      do {                                                    \
+-      struct thread_info *thread = current_thread_info();     \
+-      unsigned int domain = thread->cpu_domain;               \
+-      domain &= ~domain_val(dom, DOMAIN_MANAGER);             \
+-      thread->cpu_domain = domain | domain_val(dom, type);    \
+-      set_domain(thread->cpu_domain);                         \
+-      } while (0)
+-
++extern void modify_domain(unsigned int dom, unsigned int type);
+ #else
+ static inline void set_domain(unsigned val) { }
+ static inline void modify_domain(unsigned dom, unsigned type) { }
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index 56211f2..17e8a25 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-#define ELF_ET_DYN_BASE       (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN    ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN   ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+ /* When the program starts, a1 contains a pointer to a function to be 
+    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex)   elf_set_personality(&(ex))
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #ifdef CONFIG_MMU
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
+index de53547..52b9a28 100644
+--- a/arch/arm/include/asm/fncpy.h
++++ b/arch/arm/include/asm/fncpy.h
+@@ -81,7 +81,9 @@
+       BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||             \
+               (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+                                                                       \
++      pax_open_kernel();                                              \
+       memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);   \
++      pax_close_kernel();                                             \
+       flush_icache_range((unsigned long)(dest_buf),                   \
+               (unsigned long)(dest_buf) + (size));                    \
+                                                                       \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index e42cf59..7b94b8f 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+               return -EFAULT;
++      pax_open_userland();
++
+       smp_mb();
+       __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+       "1:     ldrex   %1, [%4]\n"
+@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+       : "cc", "memory");
+       smp_mb();
++      pax_close_userland();
++
+       *uval = val;
+       return ret;
+ }
+@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+               return -EFAULT;
++      pax_open_userland();
++
+       __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+       "1:     " TUSER(ldr) "  %1, [%4]\n"
+       "       teq     %1, %2\n"
+@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+       : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+       : "cc", "memory");
++      pax_close_userland();
++
+       *uval = val;
+       return ret;
+ }
+@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+               return -EFAULT;
+       pagefault_disable();    /* implies preempt_disable() */
++      pax_open_userland();
+       switch (op) {
+       case FUTEX_OP_SET:
+@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+               ret = -ENOSYS;
+       }
++      pax_close_userland();
+       pagefault_enable();     /* subsumes preempt_enable() */
+       if (!ret) {
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index 83eb2f7..ed77159 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -4,6 +4,6 @@
+ /*
+  * This is the "bare minimum".  AIO seems to require this.
+  */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+ #endif
+diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
+index 9e614a1..3302cca 100644
+--- a/arch/arm/include/asm/mach/dma.h
++++ b/arch/arm/include/asm/mach/dma.h
+@@ -22,7 +22,7 @@ struct dma_ops {
+       int     (*residue)(unsigned int, dma_t *);              /* optional */
+       int     (*setspeed)(unsigned int, dma_t *, int);        /* optional */
+       const char *type;
+-};
++} __do_const;
+ struct dma_struct {
+       void            *addr;          /* single DMA address           */
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 2fe141f..192dc01 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -27,13 +27,16 @@ struct map_desc {
+ #define MT_MINICLEAN          6
+ #define MT_LOW_VECTORS                7
+ #define MT_HIGH_VECTORS               8
+-#define MT_MEMORY             9
++#define MT_MEMORY_RWX         9
+ #define MT_ROM                        10
+-#define MT_MEMORY_NONCACHED   11
++#define MT_MEMORY_NONCACHED_RX        11
+ #define MT_MEMORY_DTCM                12
+ #define MT_MEMORY_ITCM                13
+ #define MT_MEMORY_SO          14
+ #define MT_MEMORY_DMA_READY   15
++#define MT_MEMORY_RW          16
++#define MT_MEMORY_RX          17
++#define MT_MEMORY_NONCACHED_RW        18
+ #ifdef CONFIG_MMU
+ extern void iotable_init(struct map_desc *, int);
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index 12f71a1..04e063c 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -35,7 +35,7 @@ struct outer_cache_fns {
+ #endif
+       void (*set_debug)(unsigned long);
+       void (*resume)(void);
+-};
++} __no_const;
+ #ifdef CONFIG_OUTER_CACHE
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index cbdc7a2..32f44fe 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -114,7 +114,7 @@ struct cpu_user_fns {
+       void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+       void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+                       unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index 943504f..c37a730 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -17,6 +17,7 @@
+ #include <asm/processor.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/system_info.h>
+ #define check_pgt_cache()             do { } while (0)
+@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+ /*
+@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #define pmd_alloc_one(mm,addr)                ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd)             do { } while (0)
+ #define pud_populate(mm,pmd,pte)      BUG()
++#define pud_populate_kernel(mm,pmd,pte)       BUG()
+ #endif        /* CONFIG_ARM_LPAE */
+@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+       __free_page(pte);
+ }
++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
++{
++#ifdef CONFIG_ARM_LPAE
++      pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#else
++      if (addr & SECTION_SIZE)
++              pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
++      else
++              pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#endif
++      flush_pmd_entry(pmdp);
++}
++
+ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+                                 pmdval_t prot)
+ {
+@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+ static inline void
+ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+ {
+-      __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
++      __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
+ }
+ #define pmd_pgtable(pmd) pmd_page(pmd)
+diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
+index 5cfba15..f415e1a 100644
+--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
+@@ -20,12 +20,15 @@
+ #define PMD_TYPE_FAULT                (_AT(pmdval_t, 0) << 0)
+ #define PMD_TYPE_TABLE                (_AT(pmdval_t, 1) << 0)
+ #define PMD_TYPE_SECT         (_AT(pmdval_t, 2) << 0)
++#define PMD_PXNTABLE          (_AT(pmdval_t, 1) << 2)         /* v7 */
+ #define PMD_BIT4              (_AT(pmdval_t, 1) << 4)
+ #define PMD_DOMAIN(x)         (_AT(pmdval_t, (x)) << 5)
+ #define PMD_PROTECTION                (_AT(pmdval_t, 1) << 9)         /* v5 */
++
+ /*
+  *   - section
+  */
++#define PMD_SECT_PXN          (_AT(pmdval_t, 1) << 0)         /* v7 */
+ #define PMD_SECT_BUFFERABLE   (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE    (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_XN           (_AT(pmdval_t, 1) << 4)         /* v6 */
+@@ -37,6 +40,7 @@
+ #define PMD_SECT_nG           (_AT(pmdval_t, 1) << 17)        /* v6 */
+ #define PMD_SECT_SUPER                (_AT(pmdval_t, 1) << 18)        /* v6 */
+ #define PMD_SECT_AF           (_AT(pmdval_t, 0))
++#define PMD_SECT_RDONLY               (_AT(pmdval_t, 0))
+ #define PMD_SECT_UNCACHED     (_AT(pmdval_t, 0))
+ #define PMD_SECT_BUFFERED     (PMD_SECT_BUFFERABLE)
+@@ -66,6 +70,7 @@
+  *   - extended small page/tiny page
+  */
+ #define PTE_EXT_XN            (_AT(pteval_t, 1) << 0)         /* v6 */
++#define PTE_EXT_PXN           (_AT(pteval_t, 1) << 2)         /* v7 */
+ #define PTE_EXT_AP_MASK               (_AT(pteval_t, 3) << 4)
+ #define PTE_EXT_AP0           (_AT(pteval_t, 1) << 4)
+ #define PTE_EXT_AP1           (_AT(pteval_t, 2) << 4)
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index f97ee02..cc9fe9e 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -126,6 +126,9 @@
+ #define L_PTE_SHARED          (_AT(pteval_t, 1) << 10)        /* shared(v6), coherent(xsc3) */
+ #define L_PTE_NONE            (_AT(pteval_t, 1) << 11)
++/* Two-level page tables only have PXN in the PGD, not in the PTE. */
++#define L_PTE_PXN             (_AT(pteval_t, 0))
++
+ /*
+  * These are the memory types, defined to be compatible with
+  * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
+diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
+index 18f5cef..25b8f43 100644
+--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
+@@ -41,6 +41,7 @@
+  */
+ #define PMD_SECT_BUFFERABLE   (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE    (_AT(pmdval_t, 1) << 3)
++#define PMD_SECT_RDONLY               (_AT(pmdval_t, 1) << 7)
+ #define PMD_SECT_S            (_AT(pmdval_t, 3) << 8)
+ #define PMD_SECT_AF           (_AT(pmdval_t, 1) << 10)
+ #define PMD_SECT_nG           (_AT(pmdval_t, 1) << 11)
+@@ -71,6 +72,7 @@
+ #define PTE_EXT_SHARED                (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define PTE_EXT_AF            (_AT(pteval_t, 1) << 10)        /* Access Flag */
+ #define PTE_EXT_NG            (_AT(pteval_t, 1) << 11)        /* nG */
++#define PTE_EXT_PXN           (_AT(pteval_t, 1) << 53)        /* PXN */
+ #define PTE_EXT_XN            (_AT(pteval_t, 1) << 54)        /* XN */
+ /*
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 86b8fe3..e25f975 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -74,6 +74,7 @@
+ #define L_PTE_RDONLY          (_AT(pteval_t, 1) << 7)         /* AP[2] */
+ #define L_PTE_SHARED          (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG           (_AT(pteval_t, 1) << 10)        /* AF */
++#define L_PTE_PXN             (_AT(pteval_t, 1) << 53)        /* PXN */
+ #define L_PTE_XN              (_AT(pteval_t, 1) << 54)        /* XN */
+ #define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)        /* unused */
+ #define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)        /* unused */
+@@ -82,6 +83,7 @@
+ /*
+  * To be used in assembly code with the upper page attributes.
+  */
++#define L_PTE_PXN_HIGH                (1 << (53 - 32))
+ #define L_PTE_XN_HIGH         (1 << (54 - 32))
+ #define L_PTE_DIRTY_HIGH      (1 << (55 - 32))
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9bcd262..fba731c 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -30,6 +30,9 @@
+ #include <asm/pgtable-2level.h>
+ #endif
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ /*
+  * Just any arbitrary offset to the start of the vmalloc VM area: the
+  * current 8MB value just means that there will be a 8MB "hole" after the
+@@ -45,6 +48,9 @@
+ #define LIBRARY_TEXT_START    0x0c000000
+ #ifndef __ASSEMBLY__
++extern pteval_t __supported_pte_mask;
++extern pmdval_t __supported_pmd_mask;
++
+ extern void __pte_error(const char *file, int line, pte_t);
+ extern void __pmd_error(const char *file, int line, pmd_t);
+ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ #define pmd_ERROR(pmd)                __pmd_error(__FILE__, __LINE__, pmd)
+ #define pgd_ERROR(pgd)                __pgd_error(__FILE__, __LINE__, pgd)
++#define  __HAVE_ARCH_PAX_OPEN_KERNEL
++#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++#include <asm/domain.h>
++#include <linux/thread_info.h>
++#include <linux/preempt.h>
++#endif
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++static inline int test_domain(int domain, int domaintype)
++{
++      return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
++}
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++      /* TODO */
++#else
++      preempt_disable();
++      BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
++      modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
++#endif
++      return 0;
++}
++
++static inline unsigned long pax_close_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++      /* TODO */
++#else
++      BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
++      /* DOMAIN_MANAGER = "client" under KERNEXEC */
++      modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
++      preempt_enable_no_resched();
++#endif
++      return 0;
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+  * This is the lowest virtual address we can permit any user space
+  * mapping to be mapped at.  This is particularly important for
+@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ /*
+  * The pgprot_* and protection_map entries will be fixed up in runtime
+  * to include the cachable and bufferable bits based on memory policy,
+- * as well as any architecture dependent bits like global/ASID and SMP
+- * shared mapping bits.
++ * as well as any architecture dependent bits like global/ASID, PXN,
++ * and SMP shared mapping bits.
+  */
+ #define _L_PTE_DEFAULT        L_PTE_PRESENT | L_PTE_YOUNG
+@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+       const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+-              L_PTE_NONE | L_PTE_VALID;
++              L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
+       pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+       return pte;
+ }
+diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
+index f3628fb..a0672dd 100644
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -75,7 +75,7 @@ extern struct processor {
+       unsigned int suspend_size;
+       void (*do_suspend)(void *);
+       void (*do_resume)(void *);
+-} processor;
++} __do_const processor;
+ #ifndef MULTI_CPU
+ extern void cpu_proc_init(void);
+diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
+index ce0dbe7..c085b6f 100644
+--- a/arch/arm/include/asm/psci.h
++++ b/arch/arm/include/asm/psci.h
+@@ -29,7 +29,7 @@ struct psci_operations {
+       int (*cpu_off)(struct psci_power_state state);
+       int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+       int (*migrate)(unsigned long cpuid);
+-};
++} __no_const;
+ extern struct psci_operations psci_ops;
+diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
+index d3a22be..3a69ad5 100644
+--- a/arch/arm/include/asm/smp.h
++++ b/arch/arm/include/asm/smp.h
+@@ -107,7 +107,7 @@ struct smp_operations {
+       int  (*cpu_disable)(unsigned int cpu);
+ #endif
+ #endif
+-};
++} __no_const;
+ /*
+  * set platform specific SMP operations
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index f00b569..aa5bb41 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -77,9 +77,9 @@ struct thread_info {
+       .flags          = 0,                                            \
+       .preempt_count  = INIT_PREEMPT_COUNT,                           \
+       .addr_limit     = KERNEL_DS,                                    \
+-      .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |     \
+-                        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |   \
+-                        domain_val(DOMAIN_IO, DOMAIN_CLIENT),         \
++      .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |  \
++                        domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) |      \
++                        domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),   \
+       .restart_block  = {                                             \
+               .fn     = do_no_restart_syscall,                        \
+       },                                                              \
+@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT     9
+ #define TIF_SYSCALL_TRACEPOINT        10
+ #define TIF_SECCOMP           11      /* seccomp syscall filtering active */
+-#define TIF_NOHZ              12      /* in adaptive nohz mode */
++/* within 8 bits of TIF_SYSCALL_TRACE
++ *  to meet flexible second operand requirements
++ */
++#define TIF_GRSEC_SETXID      12
++#define TIF_NOHZ              13      /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT      17
+ #define TIF_MEMDIE            18      /* is terminating due to OOM killer */
+ #define TIF_RESTORE_SIGMASK   20
+@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SYSCALL_TRACEPOINT       (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP          (1 << TIF_SECCOMP)
+ #define _TIF_USING_IWMMXT     (1 << TIF_USING_IWMMXT)
++#define _TIF_GRSEC_SETXID     (1 << TIF_GRSEC_SETXID)
+ /* Checks for any syscall work in entry-common.S */
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+-                         _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++                         _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
+ /*
+  * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 7e1f760..de33b13 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -18,6 +18,7 @@
+ #include <asm/domain.h>
+ #include <asm/unified.h>
+ #include <asm/compiler.h>
++#include <asm/pgtable.h>
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
+ static inline void set_fs(mm_segment_t fs)
+ {
+       current_thread_info()->addr_limit = fs;
+-      modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
++      modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
+ }
+ #define segment_eq(a,b)       ((a) == (b))
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++static inline void pax_open_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (segment_eq(get_fs(), USER_DS)) {
++              BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
++              modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
++      }
++#endif
++
++}
++
++static inline void pax_close_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (segment_eq(get_fs(), USER_DS)) {
++              BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
++              modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
++      }
++#endif
++
++}
++
+ #define __addr_ok(addr) ({ \
+       unsigned long flag; \
+       __asm__("cmp %2, %0; movlo %0, #0" \
+@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
+ #define get_user(x,p)                                                 \
+       ({                                                              \
++              int __e;                                                \
+               might_fault();                                          \
+-              __get_user_check(x,p);                                  \
++              pax_open_userland();                                    \
++              __e = __get_user_check(x,p);                            \
++              pax_close_userland();                                   \
++              __e;                                                    \
+        })
+ extern int __put_user_1(void *, unsigned int);
+@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
+ #define put_user(x,p)                                                 \
+       ({                                                              \
++              int __e;                                                \
+               might_fault();                                          \
+-              __put_user_check(x,p);                                  \
++              pax_open_userland();                                    \
++              __e = __put_user_check(x,p);                            \
++              pax_close_userland();                                   \
++              __e;                                                    \
+        })
+ #else /* CONFIG_MMU */
+@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
+ #define __get_user(x,ptr)                                             \
+ ({                                                                    \
+       long __gu_err = 0;                                              \
++      pax_open_userland();                                            \
+       __get_user_err((x),(ptr),__gu_err);                             \
++      pax_close_userland();                                           \
+       __gu_err;                                                       \
+ })
+ #define __get_user_error(x,ptr,err)                                   \
+ ({                                                                    \
++      pax_open_userland();                                            \
+       __get_user_err((x),(ptr),err);                                  \
++      pax_close_userland();                                           \
+       (void) 0;                                                       \
+ })
+@@ -312,13 +352,17 @@ do {                                                                     \
+ #define __put_user(x,ptr)                                             \
+ ({                                                                    \
+       long __pu_err = 0;                                              \
++      pax_open_userland();                                            \
+       __put_user_err((x),(ptr),__pu_err);                             \
++      pax_close_userland();                                           \
+       __pu_err;                                                       \
+ })
+ #define __put_user_error(x,ptr,err)                                   \
+ ({                                                                    \
++      pax_open_userland();                                            \
+       __put_user_err((x),(ptr),err);                                  \
++      pax_close_userland();                                           \
+       (void) 0;                                                       \
+ })
+@@ -418,11 +462,44 @@ do {                                                                     \
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++      unsigned long ret;
++
++      check_object_size(to, n, false);
++      pax_open_userland();
++      ret = ___copy_from_user(to, from, n);
++      pax_close_userland();
++      return ret;
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++      unsigned long ret;
++
++      check_object_size(from, n, true);
++      pax_open_userland();
++      ret = ___copy_to_user(to, from, n);
++      pax_close_userland();
++      return ret;
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
++extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
++
++static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
++{
++      unsigned long ret;
++      pax_open_userland();
++      ret = ___clear_user(addr, n);
++      pax_close_userland();
++      return ret;
++}
++
+ #else
+ #define __copy_from_user(to,from,n)   (memcpy(to, (void __force *)from, n), 0)
+ #define __copy_to_user(to,from,n)     (memcpy((void __force *)to, from, n), 0)
+@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (access_ok(VERIFY_READ, from, n))
+               n = __copy_from_user(to, from, n);
+       else /* security hole - plug it */
+@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (access_ok(VERIFY_WRITE, to, n))
+               n = __copy_to_user(to, from, n);
+       return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 96ee092..37f1844 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -73,7 +73,7 @@
+  * ARMv7 groups of PSR bits
+  */
+ #define APSR_MASK     0xf80f0000      /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010      /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020      /* ISA state (J, T) mask */
+ #define PSR_IT_MASK   0x0600fc00      /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK       0x00000200      /* Endianness state mask */
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 60d3b73..e5a0f22 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+       /* networking */
+ EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_from_user);
++EXPORT_SYMBOL(__csum_partial_copy_from_user);
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
+ EXPORT_SYMBOL(__csum_ipv6_magic);
+@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
+-EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
++EXPORT_SYMBOL(___clear_user);
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index d43c7e5..257c050 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -47,6 +47,87 @@
+ 9997:
+       .endm
++      .macro  pax_enter_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ make aligned space for saved DACR
++      sub     sp, sp, #8
++      @ save regs
++      stmdb   sp!, {r1, r2}
++      @ read DACR from cpu_domain into r1
++      mov     r2, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r2, r2, #(0x1fc0)
++      bic     r2, r2, #(0x3f)
++      ldr     r1, [r2, #TI_CPU_DOMAIN]
++      @ store old DACR on stack 
++      str     r1, [sp, #8]
++#ifdef CONFIG_PAX_KERNEXEC
++      @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r2, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r1, r2}
++#endif
++      .endm
++
++      .macro  pax_open_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++      @ set current DOMAIN_USER to DOMAIN_CLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
++      .macro  pax_close_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
+       .macro  pabt_helper
+       @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+ #ifdef MULTI_PABORT
+@@ -89,11 +170,15 @@
+  * Invalid mode handlers
+  */
+       .macro  inv_entry, reason
++
++      pax_enter_kernel
++
+       sub     sp, sp, #S_FRAME_SIZE
+  ARM( stmib   sp, {r1 - lr}           )
+  THUMB(       stmia   sp, {r0 - r12}          )
+  THUMB(       str     sp, [sp, #S_SP]         )
+  THUMB(       str     lr, [sp, #S_LR]         )
++
+       mov     r1, #\reason
+       .endm
+@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
+       .macro  svc_entry, stack_hole=0
+  UNWIND(.fnstart              )
+  UNWIND(.save {r0 - pc}               )
++
++      pax_enter_kernel
++
+       sub     sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
++
+ #ifdef CONFIG_THUMB2_KERNEL
+  SPFIX(       str     r0, [sp]        )       @ temporarily saved
+  SPFIX(       mov     r0, sp          )
+@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
+       ldmia   r0, {r3 - r5}
+       add     r7, sp, #S_SP - 4       @ here for interlock avoidance
+       mov     r6, #-1                 @  ""  ""      ""       ""
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ offset sp by 8 as done in pax_enter_kernel
++      add     r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
++#else
+       add     r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
++#endif
+  SPFIX(       addeq   r2, r2, #4      )
+       str     r3, [sp, #-4]!          @ save the "real" r0 copied
+                                       @ from the exception stack
+@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
+       .macro  usr_entry
+  UNWIND(.fnstart      )
+  UNWIND(.cantunwind   )       @ don't unwind the user space
++
++      pax_enter_kernel_user
++
+       sub     sp, sp, #S_FRAME_SIZE
+  ARM( stmib   sp, {r1 - r12}  )
+  THUMB(       stmia   sp, {r0 - r12}  )
+@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
+       .endm
+       .macro  kuser_cmpxchg_check
+-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
++#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
++    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+ #ifndef CONFIG_MMU
+ #warning "NPTL on non MMU needs fixing"
+ #else
+@@ -414,7 +512,9 @@ __und_usr:
+       tst     r3, #PSR_T_BIT                  @ Thumb mode?
+       bne     __und_usr_thumb
+       sub     r4, r2, #4                      @ ARM instr at LR - 4
++      pax_open_userland
+ 1:    ldrt    r0, [r4]
++      pax_close_userland
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+       rev     r0, r0                          @ little endian instruction
+ #endif
+@@ -449,10 +549,14 @@ __und_usr_thumb:
+  */
+       .arch   armv6t2
+ #endif
++      pax_open_userland
+ 2:    ldrht   r5, [r4]
++      pax_close_userland
+       cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
+       blo     __und_usr_fault_16              @ 16bit undefined instruction
++      pax_open_userland
+ 3:    ldrht   r0, [r2]
++      pax_close_userland
+       add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
+       str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
+       orr     r0, r0, r5, lsl #16
+@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
+  */
+       .pushsection .fixup, "ax"
+       .align  2
+-4:    mov     pc, r9
++4:    pax_close_userland
++      mov     pc, r9
+       .popsection
+       .pushsection __ex_table,"a"
+       .long   1b, 4b
+@@ -690,7 +795,7 @@ ENTRY(__switch_to)
+  THUMB(       stmia   ip!, {r4 - sl, fp}         )    @ Store most regs on stack
+  THUMB(       str     sp, [ip], #4               )
+  THUMB(       str     lr, [ip], #4               )
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       ldr     r6, [r2, #TI_CPU_DOMAIN]
+ #endif
+       set_tls r3, r4, r5
+@@ -699,7 +804,7 @@ ENTRY(__switch_to)
+       ldr     r8, =__stack_chk_guard
+       ldr     r7, [r7, #TSK_STACK_CANARY]
+ #endif
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
+ #endif
+       mov     r5, r0
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index bc5bc0a..d0998ca 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -10,18 +10,46 @@
+ #include <asm/unistd.h>
+ #include <asm/ftrace.h>
++#include <asm/domain.h>
+ #include <asm/unwind.h>
++#include "entry-header.S"
++
+ #ifdef CONFIG_NEED_RET_TO_USER
+ #include <mach/entry-macro.S>
+ #else
+       .macro  arch_ret_to_user, tmp1, tmp2
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r1, r2}
++        @ read DACR from cpu_domain into r1
++        mov     r2, sp
++        @ assume 8K pages, since we have to split the immediate in two
++        bic     r2, r2, #(0x1fc0)
++        bic     r2, r2, #(0x3f)
++        ldr     r1, [r2, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_KERNEXEC
++        @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++        bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++        orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++        @ set current DOMAIN_USER to DOMAIN_UDEREF
++        bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++        orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++        @ write r1 to current_thread_info()->cpu_domain
++        str     r1, [r2, #TI_CPU_DOMAIN]
++        @ write r1 to DACR
++        mcr     p15, 0, r1, c3, c0, 0
++        @ instruction sync
++        instr_sync
++      @ restore regs
++      ldmia   sp!, {r1, r2}
++#endif
+       .endm
+ #endif
+-#include "entry-header.S"
+-
+-
+       .align  5
+ /*
+  * This is the fast syscall return path.  We do as little as
+@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
+       .align  5
+ ENTRY(vector_swi)
++
+       sub     sp, sp, #S_FRAME_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
+  ARM( add     r8, sp, #S_PC           )
+@@ -399,6 +428,12 @@ ENTRY(vector_swi)
+       ldr     scno, [lr, #-4]                 @ get SWI instruction
+ #endif
++      /*
++       * do this here to avoid a performance hit of wrapping the code above
++       * that directly dereferences userland to parse the SWI instruction
++       */
++      pax_enter_kernel_user
++
+ #ifdef CONFIG_ALIGNMENT_TRAP
+       ldr     ip, __cr_alignment
+       ldr     ip, [ip]
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 160f337..db67ee4 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -73,6 +73,60 @@
+       msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
+       .endm
++      .macro  pax_enter_kernel_user
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
++      .macro  pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read old DACR from stack into r1
++      ldr     r1, [sp, #(8 + S_SP)]
++      sub     r1, r1, #8
++      ldr     r1, [r1]
++
++      @ write r1 to current_thread_info()->cpu_domain
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
+ #ifndef CONFIG_THUMB2_KERNEL
+       .macro  svc_exit, rpsr, irq = 0
+       .if     \irq != 0
+@@ -92,6 +146,9 @@
+       blne    trace_hardirqs_off
+ #endif
+       .endif
++
++      pax_exit_kernel
++
+       msr     spsr_cxsf, \rpsr
+ #if defined(CONFIG_CPU_V6)
+       ldr     r0, [sp]
+@@ -155,6 +212,9 @@
+       blne    trace_hardirqs_off
+ #endif
+       .endif
++
++      pax_exit_kernel
++
+       ldr     lr, [sp, #S_SP]                 @ top of the stack
+       ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
+       clrex                                   @ clear the exclusive monitor
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 25442f4..d4948fc 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
+ void set_fiq_handler(void *start, unsigned int length)
+ {
+-#if defined(CONFIG_CPU_USE_DOMAINS)
+-      void *base = (void *)0xffff0000;
+-#else
+       void *base = vectors_page;
+-#endif
+       unsigned offset = FIQ_OFFSET;
++      pax_open_kernel();
+       memcpy(base + offset, start, length);
++      pax_close_kernel();
++
++      if (!cache_is_vipt_nonaliasing())
++              flush_icache_range(base + offset, offset + length);
+       flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
+-      if (!vectors_high())
+-              flush_icache_range(offset, offset + length);
+ }
+ int claim_fiq(struct fiq_handler *f)
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 8bac553..caee108 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -52,7 +52,9 @@
+       .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+       .macro  pgtbl, rd, phys
+-      add     \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
++      mov     \rd, #TEXT_OFFSET
++      sub     \rd, #PG_DIR_SIZE
++      add     \rd, \rd, \phys
+       .endm
+ /*
+@@ -434,7 +436,7 @@ __enable_mmu:
+       mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+                     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+                     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+-                    domain_val(DOMAIN_IO, DOMAIN_CLIENT))
++                    domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
+       mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
+       mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
+ #endif
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 1fd749e..47adb08 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata dbg_reset_nb = {
++static struct notifier_block dbg_reset_nb = {
+       .notifier_call = dbg_reset_notify,
+ };
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 1e9be5d..03edbc2 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -37,12 +37,37 @@
+ #endif
+ #ifdef CONFIG_MMU
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
++      if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
++              return NULL;
+       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+-                              GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
++                              GFP_KERNEL, prot, -1,
+                               __builtin_return_address(0));
+ }
++
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      return __module_alloc(size, PAGE_KERNEL);
++#else
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region)
++{
++      module_free(mod, module_region);
++}
++
++void *module_alloc_exec(unsigned long size)
++{
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++#endif
+ #endif
+ int
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 07314af..c46655c 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
+       bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+       int size;
++      pax_open_kernel();
+       if (thumb2 && __opcode_is_thumb16(insn)) {
+               *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+               size = sizeof(u16);
+@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
+               *(u32 *)addr = insn;
+               size = sizeof(u32);
+       }
++      pax_close_kernel();
+       flush_icache_range((uintptr_t)(addr),
+                          (uintptr_t)(addr) + size);
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index e19edc6..e186ee1 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+       int mapping;
+       if (config >= PERF_COUNT_HW_MAX)
+-              return -ENOENT;
++              return -EINVAL;
+       mapping = (*event_map)[config];
+       return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
+index 1f2740e..b36e225 100644
+--- a/arch/arm/kernel/perf_event_cpu.c
++++ b/arch/arm/kernel/perf_event_cpu.c
+@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
++static struct notifier_block cpu_pmu_hotplug_notifier = {
+       .notifier_call = cpu_pmu_notify,
+ };
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 5bc2615..dcd439f 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -223,6 +223,7 @@ void machine_power_off(void)
+       if (pm_power_off)
+               pm_power_off();
++      BUG();
+ }
+ /*
+@@ -236,7 +237,7 @@ void machine_power_off(void)
+  * executing pre-reset code, and using RAM that the primary CPU's code wishes
+  * to use. Implementing such co-ordination would be essentially impossible.
+  */
+-void machine_restart(char *cmd)
++__noreturn void machine_restart(char *cmd)
+ {
+       smp_send_stop();
+@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
+       show_regs_print_info(KERN_DEFAULT);
+-      print_symbol("PC is at %s\n", instruction_pointer(regs));
+-      print_symbol("LR is at %s\n", regs->ARM_lr);
++      printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++      printk("LR is at %pA\n", (void *)regs->ARM_lr);
+       printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
+              "sp : %08lx  ip : %08lx  fp : %08lx\n",
+               regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
+       return 0;
+ }
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+-      unsigned long range_end = mm->brk + 0x02000000;
+-      return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
+ static int __init gate_vma_init(void)
+ {
+-      gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+ arch_initcall(gate_vma_init);
+@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
+ {
+       return in_gate_area(NULL, addr);
+ }
+-#define is_gate_vma(vma)      ((vma) = &gate_vma)
++#define is_gate_vma(vma)      ((vma) == &gate_vma)
+ #else
+ #define is_gate_vma(vma)      0
+ #endif
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+-      return is_gate_vma(vma) ? "[vectors]" :
+-              (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+-               "[sigpage]" : NULL;
++      return is_gate_vma(vma) ? "[vectors]" : NULL;
+ }
+-static struct page *signal_page;
+-extern struct page *get_signal_page(void);
+-
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+       struct mm_struct *mm = current->mm;
+-      unsigned long addr;
+-      int ret;
+-
+-      if (!signal_page)
+-              signal_page = get_signal_page();
+-      if (!signal_page)
+-              return -ENOMEM;
+       down_write(&mm->mmap_sem);
+-      addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+-      if (IS_ERR_VALUE(addr)) {
+-              ret = addr;
+-              goto up_fail;
+-      }
+-
+-      ret = install_special_mapping(mm, addr, PAGE_SIZE,
+-              VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+-              &signal_page);
+-
+-      if (ret == 0)
+-              mm->context.sigpage = addr;
+-
+- up_fail:
++      mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
+       up_write(&mm->mmap_sem);
+-      return ret;
++      return 0;
+ }
+ #endif
+diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
+index 3653164..d83e55d 100644
+--- a/arch/arm/kernel/psci.c
++++ b/arch/arm/kernel/psci.c
+@@ -24,7 +24,7 @@
+ #include <asm/opcodes-virt.h>
+ #include <asm/psci.h>
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops __read_only;
+ static int (*invoke_psci_fn)(u32, u32, u32, u32);
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 03deeff..741ce88 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
+       return current_thread_info()->syscall;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+ {
+       current_thread_info()->syscall = scno;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       /* Do the secure computing check first; failures should be fast. */
+       if (secure_computing(scno) == -1)
+               return -1;
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index b4b1d39..efdc9be 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
+ unsigned int elf_hwcap __read_mostly;
+ EXPORT_SYMBOL(elf_hwcap);
++pteval_t __supported_pte_mask __read_only;
++pmdval_t __supported_pmd_mask __read_only;
+ #ifdef MULTI_CPU
+-struct processor processor __read_mostly;
++struct processor processor;
+ #endif
+ #ifdef MULTI_TLB
+-struct cpu_tlb_fns cpu_tlb __read_mostly;
++struct cpu_tlb_fns cpu_tlb __read_only;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user __read_mostly;
++struct cpu_user_fns cpu_user __read_only;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache __read_mostly;
++struct cpu_cache_fns cpu_cache __read_only;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache __read_mostly;
++struct outer_cache_fns outer_cache __read_only;
+ EXPORT_SYMBOL(outer_cache);
+ #endif
+@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
+               asm("mrc        p15, 0, %0, c0, c1, 4"
+                   : "=r" (mmfr0));
+               if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+-                  (mmfr0 & 0x000000f0) >= 0x00000030)
++                  (mmfr0 & 0x000000f0) >= 0x00000030) {
+                       cpu_arch = CPU_ARCH_ARMv7;
+-              else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++                      if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
++                              __supported_pte_mask |= L_PTE_PXN;
++                              __supported_pmd_mask |= PMD_PXNTABLE;
++                      }
++              } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+                        (mmfr0 & 0x000000f0) == 0x00000020)
+                       cpu_arch = CPU_ARCH_ARMv6;
+               else
+@@ -479,7 +485,7 @@ static void __init setup_processor(void)
+       __cpu_architecture = __get_cpu_architecture();
+ #ifdef MULTI_CPU
+-      processor = *list->proc;
++      memcpy((void *)&processor, list->proc, sizeof processor);
+ #endif
+ #ifdef MULTI_TLB
+       cpu_tlb = *list->tlb;
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 5a42c12..a2bb7c6 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
+       MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
+ };
+-static unsigned long signal_return_offset;
+-
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+                        * except when the MPU has protected the vectors
+                        * page from PL0
+                        */
+-                      retcode = mm->context.sigpage + signal_return_offset +
+-                                (idx << 2) + thumb;
++                      retcode = mm->context.sigpage + (idx << 2) + thumb;
+               } else
+ #endif
+               {
+@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+       } while (thread_flags & _TIF_WORK_MASK);
+       return 0;
+ }
+-
+-struct page *get_signal_page(void)
+-{
+-      unsigned long ptr;
+-      unsigned offset;
+-      struct page *page;
+-      void *addr;
+-
+-      page = alloc_pages(GFP_KERNEL, 0);
+-
+-      if (!page)
+-              return NULL;
+-
+-      addr = page_address(page);
+-
+-      /* Give the signal return code some randomness */
+-      offset = 0x200 + (get_random_int() & 0x7fc);
+-      signal_return_offset = offset;
+-
+-      /*
+-       * Copy signal return handlers into the vector page, and
+-       * set sigreturn to be a pointer to these.
+-       */
+-      memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+-
+-      ptr = (unsigned long)addr + offset;
+-      flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+-
+-      return page;
+-}
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 5919eb4..b5d6dfe 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -70,7 +70,7 @@ enum ipi_msg_type {
+ static DECLARE_COMPLETION(cpu_running);
+-static struct smp_operations smp_ops;
++static struct smp_operations smp_ops __read_only;
+ void __init smp_set_ops(struct smp_operations *ops)
+ {
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 6b9567e..b8af2d6 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+-      printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++      printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+       printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
++extern void gr_handle_kernel_exploit(void);
++
+ static unsigned long oops_begin(void)
+ {
+       int cpu;
+@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
++
++      gr_handle_kernel_exploit();
++
+       if (signr)
+               do_exit(signr);
+ }
+@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+                        * The user helper at 0xffff0fe0 must be used instead.
+                        * (see entry-armv.S for details)
+                        */
++                      pax_open_kernel();
+                       *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
++                      pax_close_kernel();
+               }
+               return 0;
+@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
+       kuser_init(vectors_base);
+       flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+-      modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
++
++#ifndef CONFIG_PAX_MEMORY_UDEREF
++      modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
++#endif
++
+ }
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index 33f2ea3..0b91824 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -8,7 +8,11 @@
+ #include <asm/thread_info.h>
+ #include <asm/memory.h>
+ #include <asm/page.h>
+-      
++
++#ifdef CONFIG_PAX_KERNEXEC
++#include <asm/pgtable.h>
++#endif
++
+ #define PROC_INFO                                                     \
+       . = ALIGN(4);                                                   \
+       VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
+@@ -94,6 +98,11 @@ SECTIONS
+               _text = .;
+               HEAD_TEXT
+       }
++
++#ifdef CONFIG_PAX_KERNEXEC
++      . = ALIGN(1<<SECTION_SHIFT);
++#endif
++
+       .text : {                       /* Real text segment            */
+               _stext = .;             /* Text and read-only data      */
+                       __exception_text_start = .;
+@@ -116,6 +125,8 @@ SECTIONS
+                       ARM_CPU_KEEP(PROC_INFO)
+       }
++      _etext = .;                     /* End of text section */
++
+       RO_DATA(PAGE_SIZE)
+       . = ALIGN(4);
+@@ -146,7 +157,9 @@ SECTIONS
+       NOTES
+-      _etext = .;                     /* End of text and rodata section */
++#ifdef CONFIG_PAX_KERNEXEC
++      . = ALIGN(1<<SECTION_SHIFT);
++#endif
+ #ifndef CONFIG_XIP_KERNEL
+       . = ALIGN(PAGE_SIZE);
+@@ -224,6 +237,11 @@ SECTIONS
+       . = PAGE_OFFSET + TEXT_OFFSET;
+ #else
+       __init_end = .;
++
++#ifdef CONFIG_PAX_KERNEXEC
++      . = ALIGN(1<<SECTION_SHIFT);
++#endif
++
+       . = ALIGN(THREAD_SIZE);
+       __data_loc = .;
+ #endif
+diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
+index 14a0d98..7771a7d 100644
+--- a/arch/arm/lib/clear_user.S
++++ b/arch/arm/lib/clear_user.S
+@@ -12,14 +12,14 @@
+               .text
+-/* Prototype: int __clear_user(void *addr, size_t sz)
++/* Prototype: int ___clear_user(void *addr, size_t sz)
+  * Purpose  : clear some user memory
+  * Params   : addr - user memory address to clear
+  *          : sz   - number of bytes to clear
+  * Returns  : number of bytes NOT cleared
+  */
+ ENTRY(__clear_user_std)
+-WEAK(__clear_user)
++WEAK(___clear_user)
+               stmfd   sp!, {r1, lr}
+               mov     r2, #0
+               cmp     r1, #4
+@@ -44,7 +44,7 @@ WEAK(__clear_user)
+ USER(         strnebt r2, [r0])
+               mov     r0, #0
+               ldmfd   sp!, {r1, pc}
+-ENDPROC(__clear_user)
++ENDPROC(___clear_user)
+ ENDPROC(__clear_user_std)
+               .pushsection .fixup,"ax"
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 66a477a..bee61d3 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -16,7 +16,7 @@
+ /*
+  * Prototype:
+  *
+- *    size_t __copy_from_user(void *to, const void *from, size_t n)
++ *    size_t ___copy_from_user(void *to, const void *from, size_t n)
+  *
+  * Purpose:
+  *
+@@ -84,11 +84,11 @@
+       .text
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ #include "copy_template.S"
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+       .pushsection .fixup,"ax"
+       .align 0
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+  *  ASM optimised string functions
+  */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index d066df6..df28194 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -16,7 +16,7 @@
+ /*
+  * Prototype:
+  *
+- *    size_t __copy_to_user(void *to, const void *from, size_t n)
++ *    size_t ___copy_to_user(void *to, const void *from, size_t n)
+  *
+  * Purpose:
+  *
+@@ -88,11 +88,11 @@
+       .text
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+ #include "copy_template.S"
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+       .pushsection .fixup,"ax"
+diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
+index 7d08b43..f7ca7ea 100644
+--- a/arch/arm/lib/csumpartialcopyuser.S
++++ b/arch/arm/lib/csumpartialcopyuser.S
+@@ -57,8 +57,8 @@
+  *  Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+  */
+-#define FN_ENTRY      ENTRY(csum_partial_copy_from_user)
+-#define FN_EXIT               ENDPROC(csum_partial_copy_from_user)
++#define FN_ENTRY      ENTRY(__csum_partial_copy_from_user)
++#define FN_EXIT               ENDPROC(__csum_partial_copy_from_user)
+ #include "csumpartialcopygeneric.S"
+diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
+index 64dbfa5..84a3fd9 100644
+--- a/arch/arm/lib/delay.c
++++ b/arch/arm/lib/delay.c
+@@ -28,7 +28,7 @@
+ /*
+  * Default to the loop-based delay implementation.
+  */
+-struct arm_delay_ops arm_delay_ops = {
++struct arm_delay_ops arm_delay_ops __read_only = {
+       .delay          = __loop_delay,
+       .const_udelay   = __loop_const_udelay,
+       .udelay         = __loop_udelay,
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 025f742..8432b08 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -104,7 +104,7 @@ out:
+ }
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+       /*
+        * This test is stubbed out of the main function above to keep
+diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
+index f389228..592ef66 100644
+--- a/arch/arm/mach-kirkwood/common.c
++++ b/arch/arm/mach-kirkwood/common.c
+@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
+       clk_gate_ops.disable(hw);
+ }
+-static struct clk_ops clk_gate_fn_ops;
++static int clk_gate_fn_is_enabled(struct clk_hw *hw)
++{
++      return clk_gate_ops.is_enabled(hw);
++}
++
++static struct clk_ops clk_gate_fn_ops = {
++      .enable = clk_gate_fn_enable,
++      .disable = clk_gate_fn_disable,
++      .is_enabled = clk_gate_fn_is_enabled,
++};
+ static struct clk __init *clk_register_gate_fn(struct device *dev,
+               const char *name,
+@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
+       gate_fn->fn_en = fn_en;
+       gate_fn->fn_dis = fn_dis;
+-      /* ops is the gate ops, but with our enable/disable functions */
+-      if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
+-          clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+-              clk_gate_fn_ops = clk_gate_ops;
+-              clk_gate_fn_ops.enable = clk_gate_fn_enable;
+-              clk_gate_fn_ops.disable = clk_gate_fn_disable;
+-      }
+-
+       clk = clk_register(dev, &gate_fn->gate.hw);
+       if (IS_ERR(clk))
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index f6eeb87..cc90868 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+       .late_init = n8x0_menelaus_late_init,
+ };
+diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
+index 6c4da12..d9ca72d 100644
+--- a/arch/arm/mach-omap2/gpmc.c
++++ b/arch/arm/mach-omap2/gpmc.c
+@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
+ };
+ static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
+-static struct irq_chip gpmc_irq_chip;
+ static unsigned gpmc_irq_start;
+ static struct resource        gpmc_mem_root;
+@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
+ static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
++static struct irq_chip gpmc_irq_chip = {
++      .name = "gpmc",
++      .irq_startup = gpmc_irq_noop_ret,
++      .irq_enable = gpmc_irq_enable,
++      .irq_disable = gpmc_irq_disable,
++      .irq_shutdown = gpmc_irq_noop,
++      .irq_ack = gpmc_irq_noop,
++      .irq_mask = gpmc_irq_noop,
++      .irq_unmask = gpmc_irq_noop,
++
++};
++
+ static int gpmc_setup_irq(void)
+ {
+       int i;
+@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
+               return gpmc_irq_start;
+       }
+-      gpmc_irq_chip.name = "gpmc";
+-      gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
+-      gpmc_irq_chip.irq_enable = gpmc_irq_enable;
+-      gpmc_irq_chip.irq_disable = gpmc_irq_disable;
+-      gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
+-      gpmc_irq_chip.irq_ack = gpmc_irq_noop;
+-      gpmc_irq_chip.irq_mask = gpmc_irq_noop;
+-      gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
+-
+       gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
+       gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
+diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
+index f8bb3b9..831e7b8 100644
+--- a/arch/arm/mach-omap2/omap-wakeupgen.c
++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata irq_hotplug_notifier = {
++static struct notifier_block irq_hotplug_notifier = {
+       .notifier_call = irq_cpu_hotplug_notify,
+ };
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index e6d2307..d057195 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
+ struct platform_device __init *omap_device_build(const char *pdev_name,
+                                                int pdev_id,
+                                                struct omap_hwmod *oh,
+-                                               void *pdata, int pdata_len)
++                                               const void *pdata, int pdata_len)
+ {
+       struct omap_hwmod *ohs[] = { oh };
+@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+ struct platform_device __init *omap_device_build_ss(const char *pdev_name,
+                                                   int pdev_id,
+                                                   struct omap_hwmod **ohs,
+-                                                  int oh_cnt, void *pdata,
++                                                  int oh_cnt, const void *pdata,
+                                                   int pdata_len)
+ {
+       int ret = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
+index 044c31d..2ee0861 100644
+--- a/arch/arm/mach-omap2/omap_device.h
++++ b/arch/arm/mach-omap2/omap_device.h
+@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
+ /* Core code interface */
+ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
+-                                        struct omap_hwmod *oh, void *pdata,
++                                        struct omap_hwmod *oh, const void *pdata,
+                                         int pdata_len);
+ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+                                        struct omap_hwmod **oh, int oh_cnt,
+-                                       void *pdata, int pdata_len);
++                                       const void *pdata, int pdata_len);
+ struct omap_device *omap_device_alloc(struct platform_device *pdev,
+                                     struct omap_hwmod **ohs, int oh_cnt);
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 7341eff..fd75e34 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
+       int (*init_clkdm)(struct omap_hwmod *oh);
+       void (*update_context_lost)(struct omap_hwmod *oh);
+       int (*get_context_lost)(struct omap_hwmod *oh);
+-};
++} __no_const;
+ /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+-static struct omap_hwmod_soc_ops soc_ops;
++static struct omap_hwmod_soc_ops soc_ops __read_only;
+ /* omap_hwmod_list contains all registered struct omap_hwmods */
+ static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index d15c7bb..b2d1f0c 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+       struct omap_hwmod *oh;
+       char *oh_name = "wd_timer2";
+       char *dev_name = "omap_wdt";
+-      struct omap_wd_timer_platform_data pdata;
++      static struct omap_wd_timer_platform_data pdata = {
++              .read_reset_sources = prm_read_reset_sources
++      };
+       if (!cpu_class_is_omap2() || of_have_populated_dt())
+               return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+               return -EINVAL;
+       }
+-      pdata.read_reset_sources = prm_read_reset_sources;
+-
+       pdev = omap_device_build(dev_name, id, oh, &pdata,
+                                sizeof(struct omap_wd_timer_platform_data));
+       WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index 0cdba8d..297993e 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+       bool entered_lp2 = false;
+       if (tegra_pending_sgi())
+-              ACCESS_ONCE(abort_flag) = true;
++              ACCESS_ONCE_RW(abort_flag) = true;
+       cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
+index cad3ca86..1d79e0f 100644
+--- a/arch/arm/mach-ux500/setup.h
++++ b/arch/arm/mach-ux500/setup.h
+@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
+       .type           = MT_DEVICE,            \
+ }
+-#define __MEM_DEV_DESC(x, sz) {               \
+-      .virtual        = IO_ADDRESS(x),        \
+-      .pfn            = __phys_to_pfn(x),     \
+-      .length         = sz,                   \
+-      .type           = MT_MEMORY,            \
+-}
+-
+ extern struct smp_operations ux500_smp_ops;
+ extern void ux500_cpu_die(unsigned int cpu);
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 2950082..d0f0782 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -436,7 +436,7 @@ config CPU_32v5
+ config CPU_32v6
+       bool
+-      select CPU_USE_DOMAINS if CPU_V6 && MMU
++      select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       select TLS_REG_EMUL if !CPU_32v6K && !MMU
+ config CPU_32v6K
+@@ -585,6 +585,7 @@ config CPU_CP15_MPU
+ config CPU_USE_DOMAINS
+       bool
++      depends on !ARM_LPAE && !PAX_KERNEXEC
+       help
+         This option enables or disables the use of domain switching
+         via the set_fs() function.
+@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
+ config KUSER_HELPERS
+       bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+       default y
++      depends on !(CPU_V6 || CPU_V6K || CPU_V7)
+       help
+         Warning: disabling this option may break user programs.
+@@ -790,7 +792,7 @@ config KUSER_HELPERS
+         run on ARMv4 through to ARMv7 without modification.
+         However, the fixed address nature of these helpers can be used
+-        by ROP (return orientated programming) authors when creating
++        by ROP (Return Oriented Programming) authors when creating
+         exploits.
+         If all of the binaries and libraries which run on your platform
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 6f4585b..7b6f52b 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -211,10 +211,12 @@ union offset_union {
+ #define __get16_unaligned_check(ins,val,addr)                 \
+       do {                                                    \
+               unsigned int err = 0, v, a = addr;              \
++              pax_open_userland();                            \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val =  v << ((BE) ? 8 : 0);                     \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val |= v << ((BE) ? 0 : 8);                     \
++              pax_close_userland();                           \
+               if (err)                                        \
+                       goto fault;                             \
+       } while (0)
+@@ -228,6 +230,7 @@ union offset_union {
+ #define __get32_unaligned_check(ins,val,addr)                 \
+       do {                                                    \
+               unsigned int err = 0, v, a = addr;              \
++              pax_open_userland();                            \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val =  v << ((BE) ? 24 :  0);                   \
+               __get8_unaligned_check(ins,v,a,err);            \
+@@ -236,6 +239,7 @@ union offset_union {
+               val |= v << ((BE) ?  8 : 16);                   \
+               __get8_unaligned_check(ins,v,a,err);            \
+               val |= v << ((BE) ?  0 : 24);                   \
++              pax_close_userland();                           \
+               if (err)                                        \
+                       goto fault;                             \
+       } while (0)
+@@ -249,6 +253,7 @@ union offset_union {
+ #define __put16_unaligned_check(ins,val,addr)                 \
+       do {                                                    \
+               unsigned int err = 0, v = val, a = addr;        \
++              pax_open_userland();                            \
+               __asm__( FIRST_BYTE_16                          \
+        ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
+        THUMB( "1:     "ins"   %1, [%2]\n"     )               \
+@@ -268,6 +273,7 @@ union offset_union {
+               "       .popsection\n"                          \
+               : "=r" (err), "=&r" (v), "=&r" (a)              \
+               : "0" (err), "1" (v), "2" (a));                 \
++              pax_close_userland();                           \
+               if (err)                                        \
+                       goto fault;                             \
+       } while (0)
+@@ -281,6 +287,7 @@ union offset_union {
+ #define __put32_unaligned_check(ins,val,addr)                 \
+       do {                                                    \
+               unsigned int err = 0, v = val, a = addr;        \
++              pax_open_userland();                            \
+               __asm__( FIRST_BYTE_32                          \
+        ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
+        THUMB( "1:     "ins"   %1, [%2]\n"     )               \
+@@ -310,6 +317,7 @@ union offset_union {
+               "       .popsection\n"                          \
+               : "=r" (err), "=&r" (v), "=&r" (a)              \
+               : "0" (err), "1" (v), "2" (a));                 \
++              pax_close_userland();                           \
+               if (err)                                        \
+                       goto fault;                             \
+       } while (0)
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 5dbf13f..ee1ec24 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -25,6 +25,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/system_info.h>
+ #include <asm/tlbflush.h>
++#include <asm/sections.h>
+ #include "fault.h"
+@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+       if (fixup_exception(regs))
+               return;
++#ifdef CONFIG_PAX_KERNEXEC
++      if ((fsr & FSR_WRITE) &&
++          (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
++           (MODULES_VADDR <= addr && addr < MODULES_END)))
++      {
++              if (current->signal->curr_ip)
++                      printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++              else
++                      printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      }
++#endif
++
+       /*
+        * No handler, we'll have to terminate things with extreme prejudice.
+        */
+@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+       }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (fsr & FSR_LNX_PF) {
++              pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++              do_group_exit(SIGKILL);
++      }
++#endif
++
+       tsk->thread.address = addr;
+       tsk->thread.error_code = fsr;
+       tsk->thread.trap_no = 14;
+@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif                                        /* CONFIG_MMU */
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (__force unsigned char __user *)pc+i))
++                      printk(KERN_CONT "?? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++
++      printk(KERN_ERR "PAX: bytes at SP-4: ");
++      for (i = -1; i < 20; i++) {
++              unsigned long c;
++              if (get_user(c, (__force unsigned long __user *)sp+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08lx ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * First Level Translation Fault Handler
+  *
+@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+       const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+       struct siginfo info;
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (addr < TASK_SIZE && is_domain_fault(fsr)) {
++              if (current->signal->curr_ip)
++                      printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++              else
++                      printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++              goto die;
++      }
++#endif
++
+       if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+               return;
++die:
+       printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+               inf->name, fsr, addr);
+@@ -569,15 +631,67 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+       ifsr_info[nr].name = name;
+ }
++asmlinkage int sys_sigreturn(struct pt_regs *regs);
++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
++
+ asmlinkage void __exception
+ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ {
+       const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+       struct siginfo info;
++      if (user_mode(regs)) {
++              unsigned long sigpage = current->mm->context.sigpage;
++
++              if (sigpage <= addr && addr < sigpage + 7*4) {
++                      if (addr < sigpage + 3*4)
++                              sys_sigreturn(regs);
++                      else
++                              sys_rt_sigreturn(regs);
++                      return;
++              }
++              if (addr == 0xffff0fe0UL) {
++                      /*
++                       * PaX: __kuser_get_tls emulation
++                       */
++                      regs->ARM_r0 = current_thread_info()->tp_value;
++                      regs->ARM_pc = regs->ARM_lr;
++                      return;
++              }
++      }
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
++              if (current->signal->curr_ip)
++                      printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++                                      addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
++              else
++                      printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++                                      addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
++              goto die;
++      }
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++      if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++              unsigned int bkpt;
++
++              if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
++                      current->thread.error_code = ifsr;
++                      current->thread.trap_no = 0;
++                      pax_report_refcount_overflow(regs);
++                      fixup_exception(regs);
++                      return;
++              }
++      }
++#endif
++
+       if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+               return;
++die:
+       printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+               inf->name, ifsr, addr);
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index cf08bdf..772656c 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -3,6 +3,7 @@
+ /*
+  * Fault status register encodings.  We steal bit 31 for our own purposes.
++ * Set when the FSR value is from an instruction fault.
+  */
+ #define FSR_LNX_PF            (1 << 31)
+ #define FSR_WRITE             (1 << 11)
+@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
+ }
+ #endif
++/* valid for LPAE and !LPAE */
++static inline int is_xn_fault(unsigned int fsr)
++{
++      return ((fsr_fs(fsr) & 0x3c) == 0xc);
++}
++
++static inline int is_domain_fault(unsigned int fsr)
++{
++      return ((fsr_fs(fsr) & 0xD) == 0x9);
++}
++
+ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ unsigned long search_exception_table(unsigned long addr);
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 0ecc43f..190b956 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -30,6 +30,8 @@
+ #include <asm/setup.h>
+ #include <asm/tlb.h>
+ #include <asm/fixmap.h>
++#include <asm/system_info.h>
++#include <asm/cp15.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+@@ -726,7 +728,46 @@ void free_initmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+       extern char __tcm_start, __tcm_end;
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      unsigned long addr;
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++      int cpu_arch = cpu_architecture();
++      unsigned int cr = get_cr();
++
++      if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
++              /* make pages tables, etc before .text NX */
++              for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++                      __section_update(pmd, addr, PMD_SECT_XN);
++              }
++              /* make init NX */
++              for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++                      __section_update(pmd, addr, PMD_SECT_XN);
++              }
++              /* make kernel code/rodata RX */
++              for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_ARM_LPAE
++                      __section_update(pmd, addr, PMD_SECT_RDONLY);
++#else
++                      __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
++#endif
++              }
++      }
++#endif
++
++#ifdef CONFIG_HAVE_TCM
+       poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
+       free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
+ #endif
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index 04d9006..c547d85 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+       unsigned int mtype;
+       if (cached)
+-              mtype = MT_MEMORY;
++              mtype = MT_MEMORY_RX;
+       else
+-              mtype = MT_MEMORY_NONCACHED;
++              mtype = MT_MEMORY_NONCACHED_RX;
+       return __arm_ioremap_caller(phys_addr, size, mtype,
+                       __builtin_return_address(0));
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 10062ce..8695745 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct vm_area_struct *vma;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /*
+@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (len > TASK_SIZE)
+               return -ENOMEM;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       unsigned long addr = addr0;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /*
+@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_align)
+@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               else
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* 8 bits of randomness in 20 address space bits */
+       if ((current->flags & PF_RANDOMIZE) &&
+           !(current->personality & ADDR_NO_RANDOMIZE))
+@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index daf336f..4e6392c 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -36,6 +36,22 @@
+ #include "mm.h"
+ #include "tcm.h"
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++void modify_domain(unsigned int dom, unsigned int type)
++{
++      struct thread_info *thread = current_thread_info();
++      unsigned int domain = thread->cpu_domain;
++      /*
++       * DOMAIN_MANAGER might be defined to some other value,
++       * use the arch-defined constant
++       */
++      domain &= ~domain_val(dom, 3);
++      thread->cpu_domain = domain | domain_val(dom, type);
++      set_domain(thread->cpu_domain);
++}
++EXPORT_SYMBOL(modify_domain);
++#endif
++
+ /*
+  * empty_zero_page is a special page that is used for
+  * zero-initialized data and COW.
+@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
+ #endif /* ifdef CONFIG_CPU_CP15 / else */
+-#define PROT_PTE_DEVICE               L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
++#define PROT_PTE_DEVICE               L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
+ #define PROT_SECT_DEVICE      PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+-static struct mem_type mem_types[] = {
++#ifdef CONFIG_PAX_KERNEXEC
++#define L_PTE_KERNEXEC                L_PTE_RDONLY
++#define PMD_SECT_KERNEXEC     PMD_SECT_RDONLY
++#else
++#define L_PTE_KERNEXEC                L_PTE_DIRTY
++#define PMD_SECT_KERNEXEC     PMD_SECT_AP_WRITE
++#endif
++
++static struct mem_type mem_types[] __read_only = {
+       [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+                                 L_PTE_SHARED,
+@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
+       [MT_UNCACHED] = {
+               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_l1        = PMD_TYPE_TABLE,
+-              .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
++              .prot_sect      = PROT_SECT_DEVICE,
+               .domain         = DOMAIN_IO,
+       },
+       [MT_CACHECLEAN] = {
+-              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+ #ifndef CONFIG_ARM_LPAE
+       [MT_MINICLEAN] = {
+-              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+ #endif
+@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_RDONLY,
+               .prot_l1   = PMD_TYPE_TABLE,
+-              .domain    = DOMAIN_USER,
++              .domain    = DOMAIN_VECTORS,
+       },
+       [MT_HIGH_VECTORS] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_RDONLY,
+               .prot_l1   = PMD_TYPE_TABLE,
+-              .domain    = DOMAIN_USER,
++              .domain    = DOMAIN_VECTORS,
+       },
+-      [MT_MEMORY] = {
++      [MT_MEMORY_RWX] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
++      [MT_MEMORY_RW] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++              .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
++              .domain    = DOMAIN_KERNEL,
++      },
++      [MT_MEMORY_RX] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
++              .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++              .domain    = DOMAIN_KERNEL,
++      },
+       [MT_ROM] = {
+-              .prot_sect = PMD_TYPE_SECT,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+-      [MT_MEMORY_NONCACHED] = {
++      [MT_MEMORY_NONCACHED_RW] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
++      [MT_MEMORY_NONCACHED_RX] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
++                              L_PTE_MT_BUFFERABLE,
++              .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++              .domain    = DOMAIN_KERNEL,
++      },
+       [MT_MEMORY_DTCM] = {
+-              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+-                              L_PTE_XN,
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_l1   = PMD_TYPE_TABLE,
+-              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+       [MT_MEMORY_ITCM] = {
+@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
+       },
+       [MT_MEMORY_SO] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+-                              L_PTE_MT_UNCACHED | L_PTE_XN,
++                              L_PTE_MT_UNCACHED,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
+-                              PMD_SECT_UNCACHED | PMD_SECT_XN,
++                              PMD_SECT_UNCACHED,
+               .domain    = DOMAIN_KERNEL,
+       },
+       [MT_MEMORY_DMA_READY] = {
+@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
+                        * to prevent speculative instruction fetches.
+                        */
+                       mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
+                       mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
+                       mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
+                       mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
++
++                      /* Mark other regions on ARMv6+ as execute-never */
++
++#ifdef CONFIG_PAX_KERNEXEC
++                      mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
++                      mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
++#ifndef CONFIG_ARM_LPAE
++                      mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
++#endif
++                      mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
++                      mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
++#endif
++
++                      mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
++                      mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
+               }
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /*
+@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
+                * from SVC mode and no access from userspace.
+                */
+               mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#ifdef CONFIG_PAX_KERNEXEC
++              mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#endif
+               mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ #endif
+@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
+                       mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+-                      mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+-                      mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+-                      mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+-                      mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
+               }
+       }
+@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /* Non-cacheable Normal is XCB = 001 */
+-                      mem_types[MT_MEMORY_NONCACHED].prot_sect |=
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
++                              PMD_SECT_BUFFERED;
++                      mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
+                               PMD_SECT_BUFFERED;
+               } else {
+                       /* For both ARMv6 and non-TEX-remapping ARMv7 */
+-                      mem_types[MT_MEMORY_NONCACHED].prot_sect |=
++                      mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
++                              PMD_SECT_TEX(1);
++                      mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
+                               PMD_SECT_TEX(1);
+               }
+       } else {
+-              mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++              mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
++              mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
+       }
+ #ifdef CONFIG_ARM_LPAE
+@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
+       vecs_pgprot |= PTE_EXT_AF;
+ #endif
++      user_pgprot |= __supported_pte_mask;
++
+       for (i = 0; i < 16; i++) {
+               pteval_t v = pgprot_val(protection_map[i]);
+               protection_map[i] = __pgprot(v | user_pgprot);
+@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
+       mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+       mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+-      mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+-      mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
++      mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
++      mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
++      mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
++      mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++      mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
++      mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+-      mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
++      mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
++      mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
+       mem_types[MT_ROM].prot_sect |= cp->pmd;
+       switch (cp->pmd) {
+@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
+  * called function.  This means you can't use any function or debugging
+  * method which may touch any device, otherwise the kernel _will_ crash.
+  */
++
++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
++
+ static void __init devicemaps_init(struct machine_desc *mdesc)
+ {
+       struct map_desc map;
+       unsigned long addr;
+-      void *vectors;
+-      /*
+-       * Allocate the vector page early.
+-       */
+-      vectors = early_alloc(PAGE_SIZE * 2);
+-
+-      early_trap_init(vectors);
++      early_trap_init(&vectors);
+       for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+               pmd_clear(pmd_off_k(addr));
+@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
+        * location (0xffff0000).  If we aren't using high-vectors, also
+        * create a mapping at the low-vectors virtual address.
+        */
+-      map.pfn = __phys_to_pfn(virt_to_phys(vectors));
++      map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
+       map.virtual = 0xffff0000;
+       map.length = PAGE_SIZE;
+ #ifdef CONFIG_KUSER_HELPERS
+@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+-              map.type = MT_MEMORY;
++#ifdef CONFIG_PAX_KERNEXEC
++              if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
++                      struct map_desc kernel;
++                      struct map_desc initmap;
++
++                      /* when freeing initmem we will make this RW */
++                      initmap.pfn = __phys_to_pfn(__pa(__init_begin));
++                      initmap.virtual = (unsigned long)__init_begin;
++                      initmap.length = _sdata - __init_begin;
++                      initmap.type = MT_MEMORY_RWX;
++                      create_mapping(&initmap);
++
++                      /* when freeing initmem we will make this RX */
++                      kernel.pfn = __phys_to_pfn(__pa(_stext));
++                      kernel.virtual = (unsigned long)_stext;
++                      kernel.length = __init_begin - _stext;
++                      kernel.type = MT_MEMORY_RWX;
++                      create_mapping(&kernel);
++
++                      if (map.virtual < (unsigned long)_stext) {
++                              map.length = (unsigned long)_stext - map.virtual;
++                              map.type = MT_MEMORY_RWX;
++                              create_mapping(&map);
++                      }
++
++                      map.pfn = __phys_to_pfn(__pa(_sdata));
++                      map.virtual = (unsigned long)_sdata;
++                      map.length = end - __pa(_sdata);
++              }
++#endif
++
++              map.type = MT_MEMORY_RW;
+               create_mapping(&map);
+       }
+ }
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index a5bc92d..0bb4730 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
+        * Looks like we need to preserve some bootloader code at the
+        * beginning of SRAM for jumping to flash for reboot to work...
+        */
++      pax_open_kernel();
+       memset_io(omap_sram_base + omap_sram_skip, 0,
+                 omap_sram_size - omap_sram_skip);
++      pax_close_kernel();
+ }
+diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
+index ce6d763..cfea917 100644
+--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
+@@ -47,7 +47,7 @@ struct samsung_dma_ops {
+       int (*started)(unsigned ch);
+       int (*flush)(unsigned ch);
+       int (*stop)(unsigned ch);
+-};
++} __no_const;
+ extern void *samsung_dmadev_get_ops(void);
+ extern void *s3c_dma_get_ops(void);
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index f4726dc..39ed646 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata os_lock_nb = {
++static struct notifier_block os_lock_nb = {
+       .notifier_call = os_lock_notify,
+ };
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
+index 5ab825c..96aaec8 100644
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
++static struct notifier_block hw_breakpoint_reset_nb = {
+       .notifier_call = hw_breakpoint_reset_notify,
+ };
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index d232888..87c8df1 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN    15
++#define PAX_DELTA_STACK_LEN   15
++#endif
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index 479330b..53717a8 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define __ASM_AVR32_KMAP_TYPES_H
+ #ifdef CONFIG_DEBUG_HIGHMEM
+-# define KM_TYPE_NR 29
++# define KM_TYPE_NR 30
+ #else
+-# define KM_TYPE_NR 14
++# define KM_TYPE_NR 15
+ #endif
+ #endif /* __ASM_AVR32_KMAP_TYPES_H */
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index b2f2d2d..d1c85cb 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+ int exception_trace = 1;
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (unsigned char *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * This routine handles page faults. It determines the address and the
+  * problem, and then passes it off to one of the appropriate routines.
+@@ -174,6 +191,16 @@ bad_area:
+       up_read(&mm->mmap_sem);
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++                      if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++                              pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++                              do_group_exit(SIGKILL);
++                      }
++              }
++#endif
++
+               if (exception_trace && printk_ratelimit())
+                       printk("%s%s[%d]: segfault at %08lx pc %08lx "
+                              "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
++#include <linux/const.h>
+ #include <linux/linkage.h>    /* for asmlinkage */
+ /*
+@@ -14,7 +15,7 @@
+  * Blackfin loads 32 bytes for cache
+  */
+ #define L1_CACHE_SHIFT        5
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES       L1_CACHE_BYTES
+ #define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 7caf25d..ee65ac5 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index b86329d..6709906 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new)         (__xchg_64(new, &(v)->counter))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+       int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401d 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
++#include <linux/const.h>
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT                (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __cacheline_aligned   __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index 43901f2..0d8b865 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -2,6 +2,6 @@
+ #ifndef _ASM_KMAP_TYPES_H
+ #define _ASM_KMAP_TYPES_H
+-#define KM_TYPE_NR 17
++#define KM_TYPE_NR 18
+ #endif
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 836f147..4cf23f5 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(current->mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       goto success;
+       }
+@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       info.high_limit = (current->mm->start_stack - 0x00200000);
+       info.align_mask = 0;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               goto success;
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index f4ca594..adc72fd6 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT                (5)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT                5
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __cacheline_aligned   __aligned(L1_CACHE_BYTES)
+ #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 6e6fe18..a6ae668 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v)                       atomic64_add(1, (v))
+ #define atomic64_dec(v)                       atomic64_sub(1, (v))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec()   barrier()
+ #define smp_mb__after_atomic_dec()    barrier()
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
++#include <linux/const.h>
+ /*
+  * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+ /* Bytes per L1 (data) cache line.  */
+ #define L1_CACHE_SHIFT                CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT      L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 5a83c5c..4d7f553 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+  */
+ #define ELF_ET_DYN_BASE               (TASK_UNMAPPED_BASE + 0x800000000UL)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN   (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND               0x70000001
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index 96a8d92..617a1cf 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+       pgd_val(*pgd_entry) = __pa(pud);
+ }
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++      pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+       pud_val(*pud_entry) = __pa(pmd);
+ }
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++      pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 815810c..d60bd4c 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+  *    David Mosberger-Tang <davidm@hpl.hp.com>
+  */
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -142,6 +142,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC        __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++#endif
++
+ #define PAGE_GATE     __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL   __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index 54ff557..70c88b7 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+       unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
+       asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+-      ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++      ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 449c8c0..18965fb 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++      if (count > INT_MAX)
++              return count;
++
++      if (!__builtin_constant_p(count))
++              check_object_size(from, count, true);
++
+       return __copy_user(to, (__force void __user *) from, count);
+ }
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++      if (count > INT_MAX)
++              return count;
++
++      if (!__builtin_constant_p(count))
++              check_object_size(to, count, false);
++
+       return __copy_user((__force void __user *) to, from, count);
+ }
+@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({                                                                                    \
+       void __user *__cu_to = (to);                                                    \
+       const void *__cu_from = (from);                                                 \
+-      long __cu_len = (n);                                                            \
++      unsigned long __cu_len = (n);                                                   \
+                                                                                       \
+-      if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
++      if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) {          \
++              if (!__builtin_constant_p(n))                                           \
++                      check_object_size(__cu_from, __cu_len, true);                   \
+               __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
++      }                                                                               \
+       __cu_len;                                                                       \
+ })
+@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({                                                                                    \
+       void *__cu_to = (to);                                                           \
+       const void __user *__cu_from = (from);                                          \
+-      long __cu_len = (n);                                                            \
++      unsigned long __cu_len = (n);                                                   \
+                                                                                       \
+       __chk_user_ptr(__cu_from);                                                      \
+-      if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
++      if (__cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs())) {       \
++              if (!__builtin_constant_p(n))                                           \
++                      check_object_size(__cu_to, __cu_len, false);                    \
+               __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
++      }                                                                               \
+       __cu_len;                                                                       \
+ })
+diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
+index 2d67317..07d8bfa 100644
+--- a/arch/ia64/kernel/err_inject.c
++++ b/arch/ia64/kernel/err_inject.c
+@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
++static struct notifier_block err_inject_cpu_notifier =
+ {
+       .notifier_call = err_inject_cpu_callback,
+ };
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index d7396db..b33e873 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
++static struct notifier_block mca_cpu_notifier = {
+       .notifier_call = mca_cpu_callback
+ };
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 24603be..948052d 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+-      if (mod && mod->arch.init_unw_table &&
+-          module_region == mod->module_init) {
++      if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+               unw_remove_unwind_table(mod->arch.init_unw_table);
+               mod->arch.init_unw_table = NULL;
+       }
+@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ }
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++      return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++      return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+-      return addr - (uint64_t) mod->module_init < mod->init_size;
++      return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++      return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++      return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+-      return addr - (uint64_t) mod->module_core < mod->core_size;
++      return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+ static inline int
+@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+               break;
+             case RV_BDREL:
+-              val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++              if (in_init_rx(mod, val))
++                      val -= (uint64_t) mod->module_init_rx;
++              else if (in_init_rw(mod, val))
++                      val -= (uint64_t) mod->module_init_rw;
++              else if (in_core_rx(mod, val))
++                      val -= (uint64_t) mod->module_core_rx;
++              else if (in_core_rw(mod, val))
++                      val -= (uint64_t) mod->module_core_rw;
+               break;
+             case RV_LTV:
+@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+                *     addresses have been selected...
+                */
+               uint64_t gp;
+-              if (mod->core_size > MAX_LTOFF)
++              if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+                       /*
+                        * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+                        * at the end of the module.
+                        */
+-                      gp = mod->core_size - MAX_LTOFF / 2;
++                      gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+               else
+-                      gp = mod->core_size / 2;
+-              gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++                      gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++              gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+               mod->arch.gp = gp;
+               DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+       }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index 2b3c2d7..a318d84 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+       .notifier_call = palinfo_cpu_callback,
+       .priority = 0,
+diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
+index 4bc580a..7767f24 100644
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
+       return NOTIFY_OK;
+ }
+-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
++static struct notifier_block salinfo_cpu_notifier =
+ {
+       .notifier_call = salinfo_cpu_callback,
+       .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 41e33f8..65180b2 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       unsigned long align_mask = 0;
+       struct mm_struct *mm = current->mm;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (len > RGN_MAP_LIMIT)
+               return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       if (REGION_NUMBER(addr) == RGN_HPAGE)
+               addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              addr = mm->free_area_cache;
++      else
++#endif
++
+       if (!addr)
+               addr = TASK_UNMAPPED_BASE;
+@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       info.high_limit = TASK_SIZE;
+       info.align_mask = align_mask;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
+index dc00b2c..cce53c2 100644
+--- a/arch/ia64/kernel/topology.c
++++ b/arch/ia64/kernel/topology.c
+@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata cache_cpu_notifier =
++static struct notifier_block cache_cpu_notifier =
+ {
+       .notifier_call = cache_cpu_callback
+ };
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index 0ccb28f..8992469 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -198,7 +198,7 @@ SECTIONS {
+       /* Per-cpu data: */
+       . = ALIGN(PERCPU_PAGE_SIZE);
+       PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+-      __phys_per_cpu_start = __per_cpu_load;
++      __phys_per_cpu_start = per_cpu_load;
+       /*
+        * ensure percpu data fits
+        * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 6cf0341..d352594 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+       return pte_present(pte);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ #     define VM_READ_BIT      0
+ #     define VM_WRITE_BIT     1
+ #     define VM_EXEC_BIT      2
+@@ -149,8 +166,21 @@ retry:
+       if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+               goto bad_area;
+-      if ((vma->vm_flags & mask) != mask)
++      if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++                      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++                              goto bad_area;
++
++                      up_read(&mm->mmap_sem);
++                      pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               goto bad_area;
++      }
+       /*
+        * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 76069c1..c2aa816 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+               unsigned long pgoff, unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+       if (len > RGN_MAP_LIMIT)
+               return -ENOMEM;
+@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+       info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+       info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index d1fe4b4..2628f37 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
+               vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+               vma->vm_end = vma->vm_start + PAGE_SIZE;
+               vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++                      vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++                      if (current->mm->pax_flags & MF_PAX_MPROTECT)
++                              vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++              }
++#endif
++
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+               down_write(&current->mm->mmap_sem);
+               if (insert_vm_struct(current->mm, vma)) {
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee9..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT                4
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif  /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       prefetch(from);
+       if (access_ok(VERIFY_WRITE, to, n))
+               __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       prefetchw(to);
+       if (access_ok(VERIFY_READ, from, n))
+               __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define        L1_CACHE_SHIFT  4
+-#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
++#define        L1_CACHE_BYTES  (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index 3c52fa6..11b2ad8 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & HUGEPT_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
++#include <linux/const.h>
+ #include <asm/registers.h>
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES       L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 08b6079..eb272cf 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -21,6 +21,10 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i)          { (i) }
+ /*
+@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+  */
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* CONFIG_64BIT */
+ /*
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
++#include <linux/const.h>
+ #include <kmalloc.h>
+ #define L1_CACHE_SHIFT                CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_SHIFT               L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES               L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index cf3ae24..238d22f 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
+ #endif
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+                                      int uses_interp);
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
+index c1f6afa..38cc6e9 100644
+--- a/arch/mips/include/asm/exec.h
++++ b/arch/mips/include/asm/exec.h
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* _ASM_EXEC_H */
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index d44622c..64990d2 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -12,15 +12,25 @@ typedef struct
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i)     atomic_long_set_unchecked(&(l)->a, (i))
+ #define local_add(i, l) atomic_long_add((i), (&(l)->a))
++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
+ #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
+ #define local_inc(l)  atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l)        atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l)  atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l)        atomic_long_dec_unchecked(&(l)->a)
+ /*
+  * Same as above, but return the result value
+@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
+       return result;
+ }
++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
++{
++      unsigned long result;
++
++      if (kernel_uses_llsc && R10000_LLSC_WAR) {
++              unsigned long temp;
++
++              __asm__ __volatile__(
++              "       .set    mips3                                   \n"
++              "1:"    __LL    "%1, %2         # local_add_return      \n"
++              "       addu    %0, %1, %3                              \n"
++                      __SC    "%0, %2                                 \n"
++              "       beqzl   %0, 1b                                  \n"
++              "       addu    %0, %1, %3                              \n"
++              "       .set    mips0                                   \n"
++              : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++              : "Ir" (i), "m" (l->a.counter)
++              : "memory");
++      } else if (kernel_uses_llsc) {
++              unsigned long temp;
++
++              __asm__ __volatile__(
++              "       .set    mips3                                   \n"
++              "1:"    __LL    "%1, %2         # local_add_return      \n"
++              "       addu    %0, %1, %3                              \n"
++                      __SC    "%0, %2                                 \n"
++              "       beqz    %0, 1b                                  \n"
++              "       addu    %0, %1, %3                              \n"
++              "       .set    mips0                                   \n"
++              : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++              : "Ir" (i), "m" (l->a.counter)
++              : "memory");
++      } else {
++              unsigned long flags;
++
++              local_irq_save(flags);
++              result = l->a.counter;
++              result += i;
++              l->a.counter = result;
++              local_irq_restore(flags);
++      }
++
++      return result;
++}
++
+ static __inline__ long local_sub_return(long i, local_t * l)
+ {
+       unsigned long result;
+@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
+ #define local_cmpxchg(l, o, n) \
+       ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++      ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+ /**
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index f59552f..3abe9b9 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+   #ifdef CONFIG_CPU_MIPS32
+     typedef struct { unsigned long pte_low, pte_high; } pte_t;
+     #define pte_val(x)          ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+-    #define __pte(x)    ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++    #define __pte(x)    ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+   #else
+      typedef struct { unsigned long long pte; } pte_t;
+      #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index 881d18b..cea38bc 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+       set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
+ #endif
+ /*
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 895320e..bf63e10 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_32BIT_ADDR                23      /* 32-bit address space (o32/n32) */
+ #define TIF_FPUBOUND          24      /* thread bound to FPU-full CPU set */
+ #define TIF_LOAD_WATCH                25      /* If set, load watch registers */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID      29      /* update credentials on syscall entry/exit */
+ #define TIF_SYSCALL_TRACE     31      /* syscall trace active */
+ #define _TIF_SYSCALL_TRACE    (1<<TIF_SYSCALL_TRACE)
+@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_32BIT_ADDR               (1<<TIF_32BIT_ADDR)
+ #define _TIF_FPUBOUND         (1<<TIF_FPUBOUND)
+ #define _TIF_LOAD_WATCH               (1<<TIF_LOAD_WATCH)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
++
++#define _TIF_SYSCALL_WORK     (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
+ /* work to do in syscall_trace_leave() */
+-#define _TIF_WORK_SYSCALL_EXIT        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
++#define _TIF_WORK_SYSCALL_EXIT        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK                \
+       (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
+ /* work to do on any return to u-space */
+-#define _TIF_ALLWORK_MASK     (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
++#define _TIF_ALLWORK_MASK     (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
+ #endif /* __KERNEL__ */
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 1188e00..41cf144 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE               (TASK32_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 202e581..689ca79 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE               (TASK32_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ /*
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index c6a041d..b3e7318 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
+ out:
+       return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-
+-      return sp & ALMASK;
+-}
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 9c6299c..2fb4c22 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -528,6 +528,10 @@ static inline int audit_arch(void)
+       return arch;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+  * Notification of system call entry/exit
+  * - triggered by current->work.syscall_trace
+@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+       /* do the secure computing check first */
+       secure_computing_strict(regs->regs[2]);
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (!(current->ptrace & PT_PTRACED))
+               goto out;
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index 9b36424..e7f4154 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+ stack_done:
+       lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
+-      li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++      li      t1, _TIF_SYSCALL_WORK
+       and     t0, t1
+       bnez    t0, syscall_trace_entry # -> yes
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index 97a5909..59622f8 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
+       sd      a3, PT_R26(sp)          # save a3 for syscall restarting
+-      li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++      li      t1, _TIF_SYSCALL_WORK
+       LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
+       and     t0, t1, t0
+       bnez    t0, syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index edcb659..fb2ab09 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
+       sd      a3, PT_R26(sp)          # save a3 for syscall restarting
+-      li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++      li      t1, _TIF_SYSCALL_WORK
+       LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
+       and     t0, t1, t0
+       bnez    t0, n32_syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 74f485d..47d2c38 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+       PTR     4b, bad_stack
+       .previous
+-      li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++      li      t1, _TIF_SYSCALL_WORK
+       LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
+       and     t0, t1, t0
+       bnez    t0, trace_a_syscall
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 0fead53..eeb00a6 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -27,6 +27,23 @@
+ #include <asm/highmem.h>              /* For VMALLOC_END */
+ #include <linux/kdebug.h>
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * This routine handles page faults.  It determines the address,
+  * and the problem, and then passes it off to one of the appropriate
+@@ -196,6 +213,14 @@ bad_area:
+ bad_area_nosemaphore:
+       /* User mode accesses just cause a SIGSEGV */
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
++                      pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               tsk->thread.cp0_badvaddr = address;
+               tsk->thread.error_code = write;
+ #if 0
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index 7e5fe27..9656513 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+       struct vm_area_struct *vma;
+       unsigned long addr = addr0;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (unlikely(len > TASK_SIZE))
+@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+               do_color_align = 1;
+       /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
+                       return addr;
+       }
+       info.length = len;
+       info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       if (dir == DOWN) {
+               info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE) {
+               random_factor = get_random_int();
+               random_factor = random_factor << PAGE_SHIFT;
+@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+ }
+-static inline unsigned long brk_rnd(void)
+-{
+-      unsigned long rnd = get_random_int();
+-
+-      rnd = rnd << PAGE_SHIFT;
+-      /* 8MB for 32bit, 256MB for 64bit */
+-      if (TASK_IS_32BIT_ADDR)
+-              rnd = rnd & 0x7ffffful;
+-      else
+-              rnd = rnd & 0xffffffful;
+-
+-      return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+-      unsigned long base = mm->brk;
+-      unsigned long ret;
+-
+-      ret = PAGE_ALIGN(base + brk_rnd());
+-
+-      if (ret < mm->brk)
+-              return mm->brk;
+-
+-      return ret;
+-}
+-
+ int __virt_addr_valid(const volatile void *kaddr)
+ {
+       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
++#include <linux/const.h>
++
+ /* L1 cache */
+ #define L1_CACHE_NWAYS                4       /* number of ways in caches */
+ #define L1_CACHE_NENTRIES     256     /* number of entries in each way */
+-#define L1_CACHE_BYTES                16      /* bytes per entry */
+ #define L1_CACHE_SHIFT                4       /* shift for bytes per entry */
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
+ #define L1_CACHE_WAYDISP      0x1000  /* displacement of one way from the next */
+ #define L1_CACHE_TAG_VALID    0x00000001      /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
++#include <linux/const.h>
++
+ /*
+  * L1 cache
+  */
+ #define L1_CACHE_NWAYS                4               /* number of ways in caches */
+ #define L1_CACHE_NENTRIES     128             /* number of entries in each way */
+-#define L1_CACHE_BYTES                32              /* bytes per entry */
+ #define L1_CACHE_SHIFT                5               /* shift for bytes per entry */
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
+ #define L1_CACHE_WAYDISP      0x1000          /* distance from one way to the next */
+ #define L1_CACHE_TAG_VALID    0x00000001      /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 4ce7a01..449202a 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,11 +19,13 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+  * they shouldn't be hard-coded!
+  */
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 472886c..00e7df9 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+       return dec;
+ }
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 47f11c7..3420df2 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
++#include <linux/const.h>
+ /*
+  * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+@@ -15,13 +16,13 @@
+  * just ruin performance.
+  */
+ #ifdef CONFIG_PA20
+-#define L1_CACHE_BYTES 64
+ #define L1_CACHE_SHIFT 6
+ #else
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
+ #endif
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index ad2b503..bdf1651 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs;   /* forward declaration... */
+ #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x10000UL
++
++#define PAX_DELTA_MMAP_LEN    16
++#define PAX_DELTA_STACK_LEN   16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+    but it's not easy, and we've already done it here.  */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index fc987a1..6e068ef 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+                       (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++      pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr)               ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x)                       do { } while (0)
+ #define pgd_populate(mm, pmd, pte)    BUG()
++#define pgd_populate_kernel(mm, pmd, pte)     BUG()
+ #endif
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 1e40d7f..a3eb445 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+ #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY       PAGE_EXECREAD
+ #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL   __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC      __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX       __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index e0a8235..ce2f1e1 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                           const void __user *from,
+                                           unsigned long n)
+ {
+-        int sz = __compiletime_object_size(to);
++        size_t sz = __compiletime_object_size(to);
+         int ret = -EFAULT;
+-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
++        if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
+                 ret = __copy_from_user(to, from, n);
+         else
+                 copy_from_user_overflow();
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index 2a625fb..9908930 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -98,16 +98,38 @@
+ /* three functions to determine where in the module core
+  * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++      return (loc >= me->module_init_rx &&
++              loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++      return (loc >= me->module_init_rw &&
++              loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+-      return (loc >= me->module_init &&
+-              loc <= (me->module_init + me->init_size));
++      return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++      return (loc >= me->module_core_rx &&
++              loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++      return (loc >= me->module_core_rw &&
++              loc < (me->module_core_rw + me->core_size_rw));
+ }
+ static inline int in_core(struct module *me, void *loc)
+ {
+-      return (loc >= me->module_core &&
+-              loc <= (me->module_core + me->core_size));
++      return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+ static inline int in_local(struct module *me, void *loc)
+@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+       }
+       /* align things a bit */
+-      me->core_size = ALIGN(me->core_size, 16);
+-      me->arch.got_offset = me->core_size;
+-      me->core_size += gots * sizeof(struct got_entry);
++      me->core_size_rw = ALIGN(me->core_size_rw, 16);
++      me->arch.got_offset = me->core_size_rw;
++      me->core_size_rw += gots * sizeof(struct got_entry);
+-      me->core_size = ALIGN(me->core_size, 16);
+-      me->arch.fdesc_offset = me->core_size;
+-      me->core_size += fdescs * sizeof(Elf_Fdesc);
++      me->core_size_rw = ALIGN(me->core_size_rw, 16);
++      me->arch.fdesc_offset = me->core_size_rw;
++      me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+       me->arch.got_max = gots;
+       me->arch.fdesc_max = fdescs;
+@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+       BUG_ON(value == 0);
+-      got = me->module_core + me->arch.got_offset;
++      got = me->module_core_rw + me->arch.got_offset;
+       for (i = 0; got[i].addr; i++)
+               if (got[i].addr == value)
+                       goto out;
+@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+-      Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++      Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+       if (!value) {
+               printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+       /* Create new one */
+       fdesc->addr = value;
+-      fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++      fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+       return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
+       table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+       end = table + sechdrs[me->arch.unwind_section].sh_size;
+-      gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++      gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+       DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+              me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 5dfd248..64914ac 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++                                      unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       info.flags = 0;
+       info.length = len;
+@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+       info.high_limit = TASK_SIZE;
+       info.align_mask = 0;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
+       return (unsigned long) mapping >> 8;
+ }
+-static unsigned long get_shared_area(struct address_space *mapping,
+-              unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++              unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       info.flags = 0;
+       info.length = len;
+@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & (SHMLBA - 1);
+       info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                       return -EINVAL;
+               return addr;
+       }
+-      if (!addr)
++      if (!addr) {
+               addr = TASK_UNMAPPED_BASE;
++#ifdef CONFIG_PAX_RANDMMAP
++              if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++                      addr += current->mm->delta_mmap;
++#endif
++
++      }
++
+       if (filp) {
+-              addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++              addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+       } else if(flags & MAP_SHARED) {
+-              addr = get_shared_area(NULL, addr, len, pgoff);
++              addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+       } else {
+-              addr = get_unshared_area(addr, len);
++              addr = get_unshared_area(filp, addr, len, flags);
+       }
+       return addr;
+ }
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 04e47c6..7a8faf6 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+                       down_read(&current->mm->mmap_sem);
+                       vma = find_vma(current->mm,regs->iaoq[0]);
+-                      if (vma && (regs->iaoq[0] >= vma->vm_start)
+-                              && (vma->vm_flags & VM_EXEC)) {
+-
++                      if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+                               fault_address = regs->iaoq[0];
+                               fault_space = regs->iasq[0];
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index f247a34..dc0f219 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+-      if (code == 6 || code == 16)
++      if (code == 6 || code == 7 || code == 16)
+           return VM_EXEC;
+       switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+                       }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when rt_sigreturn trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: unpatched PLT emulation */
++              unsigned int bl, depwi;
++
++              err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++              err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++              if (err)
++                      break;
++
++              if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++                      unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++                      err = get_user(ldw, (unsigned int *)addr);
++                      err |= get_user(bv, (unsigned int *)(addr+4));
++                      err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++                      if (err)
++                              break;
++
++                      if (ldw == 0x0E801096U &&
++                          bv == 0xEAC0C000U &&
++                          ldw2 == 0x0E881095U)
++                      {
++                              unsigned int resolver, map;
++
++                              err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++                              err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++                              if (err)
++                                      break;
++
++                              regs->gr[20] = instruction_pointer(regs)+8;
++                              regs->gr[21] = map;
++                              regs->gr[22] = resolver;
++                              regs->iaoq[0] = resolver | 3UL;
++                              regs->iaoq[1] = regs->iaoq[0] + 4;
++                              return 3;
++                      }
++              }
++      } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++      if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++              return 1;
++#endif
++
++      do { /* PaX: rt_sigreturn emulation */
++              unsigned int ldi1, ldi2, bel, nop;
++
++              err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++              err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++              err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++              err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++              if (err)
++                      break;
++
++              if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++                  ldi2 == 0x3414015AU &&
++                  bel == 0xE4008200U &&
++                  nop == 0x08000240U)
++              {
++                      regs->gr[25] = (ldi1 & 2) >> 1;
++                      regs->gr[20] = __NR_rt_sigreturn;
++                      regs->gr[31] = regs->iaoq[1] + 16;
++                      regs->sr[0] = regs->iasq[1];
++                      regs->iaoq[0] = 0x100UL;
++                      regs->iaoq[1] = regs->iaoq[0] + 4;
++                      regs->iasq[0] = regs->sr[2];
++                      regs->iasq[1] = regs->sr[2];
++                      return 2;
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+       const struct exception_table_entry *fix;
+@@ -194,8 +305,33 @@ good_area:
+       acc_type = parisc_acctyp(code,regs->iir);
+-      if ((vma->vm_flags & acc_type) != acc_type)
++      if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++                  (address & ~3UL) == instruction_pointer(regs))
++              {
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 3:
++                              return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      case 2:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               goto bad_area;
++      }
+       /*
+        * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index e3b1d41..8e81edf 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+       return t1;
+ }
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __powerpc64__ */
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index 9e495c9..b6878e5 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,7 @@
+ #ifdef __KERNEL__
++#include <linux/const.h>
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +23,7 @@
+ #define L1_CACHE_SHIFT                7
+ #endif
+-#define       L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
++#define       L1_CACHE_BYTES          (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define       SMP_CACHE_BYTES         L1_CACHE_BYTES
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index cc0655a..13eac2e 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -28,8 +28,19 @@
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE               (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE               (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN    (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN   (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN    15
++#define PAX_DELTA_STACK_LEN   15
++#endif
++#endif
+ /*
+  * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+       (0x7ff >> (PAGE_SHIFT - 12)) : \
+       (0x3ffff >> (PAGE_SHIFT - 12)))
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+-
+ #ifdef CONFIG_SPU_BASE
+ /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+ #define NT_SPU                1
+diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
+index 8196e9c..d83a9f3 100644
+--- a/arch/powerpc/include/asm/exec.h
++++ b/arch/powerpc/include/asm/exec.h
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 5acabbd..7ea14fa 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -10,7 +10,7 @@
+  * 2 of the License, or (at your option) any later version.
+  */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+ #endif        /* __KERNEL__ */
+ #endif        /* _ASM_POWERPC_KMAP_TYPES_H */
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index 8565c25..2865190 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+ }
+ #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+       return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 988c812..63c7d70 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
+  * and needs to be executable.  This means the whole heap ends
+  * up being executable.
+  */
+-#define VM_DATA_DEFAULT_FLAGS32       (VM_READ | VM_WRITE | VM_EXEC | \
+-                               VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++      (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++       VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+ #define VM_DATA_DEFAULT_FLAGS64       (VM_READ | VM_WRITE | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x)     ((x) >= PAGE_OFFSET)
+ #endif
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ #ifndef CONFIG_PPC_BOOK3S_64
+ /*
+  * Use the top bit of the higher-level page table entries to indicate whether
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index 88693ce..ac6f9ab 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -153,15 +153,18 @@ do {                                             \
+  * stack by default, so in the absence of a PT_GNU_STACK program header
+  * we turn execute permission off.
+  */
+-#define VM_STACK_DEFAULT_FLAGS32      (VM_READ | VM_WRITE | VM_EXEC | \
+-                                       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++      (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++       VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+ #define VM_STACK_DEFAULT_FLAGS64      (VM_READ | VM_WRITE | \
+                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+       (is_32bit_task() ? \
+        VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+ #include <asm-generic/getorder.h>
+diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
+index b66ae72..4a378cd 100644
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+ #define pgd_populate(MM, PGD, PUD)    pgd_set(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PUD)     pgd_populate((MM), (PGD), (PUD))
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+       pud_set(pud, (unsigned long)pmd);
+ }
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
++
+ #define pmd_populate(mm, pmd, pte_page) \
+       pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
+ #endif
+ #define pud_populate(mm, pud, pmd)    pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd)     pud_populate((mm), (pud), (pmd))
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 7aeb955..19f748e 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h>            /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
+index 4aad413..85d86bf 100644
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE    0x004   /* when !present: nonlinear file mapping */
+ #define _PAGE_USER    0x004   /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008   /* G: prohibit speculative access */
++#define _PAGE_EXEC    _PAGE_GUARDED
+ #define _PAGE_COHERENT        0x010   /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE        0x020   /* I: cache inhibit */
+ #define _PAGE_WRITETHRU       0x040   /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index e1fb161..2290d1d 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -234,6 +234,7 @@
+ #define SPRN_DBCR     0x136   /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR    0x012   /* Data Storage Interrupt Status Register */
+ #define   DSISR_NOHPTE                0x40000000      /* no translation found */
++#define   DSISR_GUARDED               0x10000000      /* fetch from guarded storage */
+ #define   DSISR_PROTFAULT     0x08000000      /* protection fault */
+ #define   DSISR_ISSTORE               0x02000000      /* access was a store */
+ #define   DSISR_DABRMATCH     0x00400000      /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 48cfc85..891382f 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops_t {
+       int   (*cpu_disable)(void);
+       void  (*cpu_die)(unsigned int nr);
+       int   (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index ba7b197..d292e26 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_POLLING_NRFLAG    3       /* true if poll_idle() is polling
+                                          TIF_NEED_RESCHED */
+ #define TIF_32BIT             4       /* 32 bit binary */
+-#define TIF_PERFMON_WORK      5       /* work for pfm_handle_work() */
+ #define TIF_PERFMON_CTXSW     6       /* perfmon needs ctxsw calls */
+ #define TIF_SYSCALL_AUDIT     7       /* syscall auditing active */
+ #define TIF_SINGLESTEP                8       /* singlestepping active */
+@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_EMULATE_STACK_STORE       16      /* Is an instruction emulation
+                                               for stack store? */
+ #define TIF_MEMDIE            17      /* is terminating due to OOM killer */
++#define TIF_PERFMON_WORK      18      /* work for pfm_handle_work() */
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID      5       /* update credentials on syscall entry/exit */
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE    (1<<TIF_SYSCALL_TRACE)
+@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE      (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ             (1<<TIF_NOHZ)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
+ #define _TIF_SYSCALL_T_OR_A   (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+-                               _TIF_NOHZ)
++                               _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ #define _TIF_USER_WORK_MASK   (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 4db4959..aba5c41 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -318,52 +318,6 @@ do {                                                              \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+               const void __user *from, unsigned long size);
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+-              const void __user *from, unsigned long n)
+-{
+-      unsigned long over;
+-
+-      if (access_ok(VERIFY_READ, from, n))
+-              return __copy_tofrom_user((__force void __user *)to, from, n);
+-      if ((unsigned long)from < TASK_SIZE) {
+-              over = (unsigned long)from + n - TASK_SIZE;
+-              return __copy_tofrom_user((__force void __user *)to, from,
+-                              n - over) + over;
+-      }
+-      return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+-              const void *from, unsigned long n)
+-{
+-      unsigned long over;
+-
+-      if (access_ok(VERIFY_WRITE, to, n))
+-              return __copy_tofrom_user(to, (__force void __user *)from, n);
+-      if ((unsigned long)to < TASK_SIZE) {
+-              over = (unsigned long)to + n - TASK_SIZE;
+-              return __copy_tofrom_user(to, (__force void __user *)from,
+-                              n - over) + over;
+-      }
+-      return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+-      __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+-                                  unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+-                                unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+-                                unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+               const void __user *from, unsigned long n)
+ {
+@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+               if (ret == 0)
+                       return 0;
+       }
++
++      if (!__builtin_constant_p(n))
++              check_object_size(to, n, false);
++
+       return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+               if (ret == 0)
+                       return 0;
+       }
++
++      if (!__builtin_constant_p(n))
++              check_object_size(from, n, true);
++
+       return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+       return __copy_to_user_inatomic(to, from, size);
+ }
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++              const void __user *from, unsigned long n)
++{
++      unsigned long over;
++
++      if ((long)n < 0)
++              return n;
++
++      if (access_ok(VERIFY_READ, from, n)) {
++              if (!__builtin_constant_p(n))
++                      check_object_size(to, n, false);
++              return __copy_tofrom_user((__force void __user *)to, from, n);
++      }
++      if ((unsigned long)from < TASK_SIZE) {
++              over = (unsigned long)from + n - TASK_SIZE;
++              if (!__builtin_constant_p(n - over))
++                      check_object_size(to, n - over, false);
++              return __copy_tofrom_user((__force void __user *)to, from,
++                              n - over) + over;
++      }
++      return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++              const void *from, unsigned long n)
++{
++      unsigned long over;
++
++      if ((long)n < 0)
++              return n;
++
++      if (access_ok(VERIFY_WRITE, to, n)) {
++              if (!__builtin_constant_p(n))
++                      check_object_size(from, n, true);
++              return __copy_tofrom_user(to, (__force void __user *)from, n);
++      }
++      if ((unsigned long)to < TASK_SIZE) {
++              over = (unsigned long)to + n - TASK_SIZE;
++              if (!__builtin_constant_p(n))
++                      check_object_size(from, n - over, true);
++              return __copy_tofrom_user(to, (__force void __user *)from,
++                              n - over) + over;
++      }
++      return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++      __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++      if ((long)n < 0 || n > INT_MAX)
++              return n;
++
++      if (!__builtin_constant_p(n))
++              check_object_size(to, n, false);
++
++      if (likely(access_ok(VERIFY_READ, from, n)))
++              n = __copy_from_user(to, from, n);
++      else
++              memset(to, 0, n);
++      return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++      if ((long)n < 0 || n > INT_MAX)
++              return n;
++
++      if (likely(access_ok(VERIFY_WRITE, to, n))) {
++              if (!__builtin_constant_p(n))
++                      check_object_size(from, n, true);
++              n = __copy_to_user(to, from, n);
++      }
++      return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++                                unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 645170a..6cf0271 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -757,6 +757,7 @@ storage_fault_common:
+       std     r14,_DAR(r1)
+       std     r15,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++      bl      .save_nvgprs
+       mr      r4,r14
+       mr      r5,r15
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+@@ -765,8 +766,7 @@ storage_fault_common:
+       cmpdi   r3,0
+       bne-    1f
+       b       .ret_from_except_lite
+-1:    bl      .save_nvgprs
+-      mr      r5,r3
++1:    mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ld      r4,_DAR(r1)
+       bl      .bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 902ca3c..e942155 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1357,10 +1357,10 @@ handle_page_fault:
+ 11:   ld      r4,_DAR(r1)
+       ld      r5,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++      bl      .save_nvgprs
+       bl      .do_page_fault
+       cmpdi   r3,0
+       beq+    12f
+-      bl      .save_nvgprs
+       mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lwz     r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 2e3200c..72095ce 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+                       me->arch.core_plt_section = i;
+       }
+       if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+-              printk("Module doesn't contain .plt or .init.plt sections.\n");
++              printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+               return -ENOEXEC;
+       }
+@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
+       DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+       /* Init, or core PLT? */
+-      if (location >= mod->module_core
+-          && location < mod->module_core + mod->core_size)
++      if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++          (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+               entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+-      else
++      else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++               (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+               entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++      else {
++              printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++              return ~0UL;
++      }
+       /* Find this entry, or if that fails, the next avail. entry */
+       while (entry->jump[0]) {
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 7baa27b..f6b394a 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
+        * Lookup NIP late so we have the best change of getting the
+        * above info out without failing
+        */
+-      printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+-      printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++      printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++      printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
+@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+               newsp = stack[0];
+               ip = stack[STACK_FRAME_LR_SAVE];
+               if (!firstframe || ip != lr) {
+-                      printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++                      printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+                       if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+-                              printk(" (%pS)",
++                              printk(" (%pA)",
+                                      (void *)current->ret_stack[curr_frame].ret);
+                               curr_frame--;
+                       }
+@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+                       struct pt_regs *regs = (struct pt_regs *)
+                               (sp + STACK_FRAME_OVERHEAD);
+                       lr = regs->link;
+-                      printk("--- Exception: %lx at %pS\n    LR = %pS\n",
++                      printk("--- Exception: %lx at %pA\n    LR = %pA\n",
+                              regs->trap, (void *)regs->nip, (void *)lr);
+                       firstframe = 1;
+               }
+@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
+       mtspr(SPRN_CTRLT, ctrl);
+ }
+ #endif /* CONFIG_PPC64 */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-      return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+-        unsigned long rnd = 0;
+-
+-      /* 8MB for 32bit, 1GB for 64bit */
+-      if (is_32bit_task())
+-              rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+-      else
+-              rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+-      return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+-      unsigned long base = mm->brk;
+-      unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+-      /*
+-       * If we are using 1TB segments and we are allowed to randomise
+-       * the heap, we can put it above 1TB so it is backed by a 1TB
+-       * segment. Otherwise the heap will be in the bottom 1TB
+-       * which always uses 256MB segments and this may result in a
+-       * performance penalty.
+-       */
+-      if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+-              base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+-      ret = PAGE_ALIGN(base + brk_rnd());
+-
+-      if (ret < mm->brk)
+-              return mm->brk;
+-
+-      return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+-      unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+-      if (ret < base)
+-              return base;
+-
+-      return ret;
+-}
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 64f7bd5..8dd550f 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+  * We must return the syscall number to actually look up in the table.
+  * This can be -1L to skip running any syscall at all.
+@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+       secure_computing_strict(regs->gpr[0]);
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+           tracehook_report_syscall_entry(regs))
+               /*
+@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+       int step;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       audit_syscall_exit(regs);
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 0f83122..c0aca6a 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+       /* Save user registers on the stack */
+       frame = &rt_sf->uc.uc_mcontext;
+       addr = frame;
+-      if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++      if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+               sigret = 0;
+               tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+       } else {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 887e99d..310bc11 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+ #endif
+       /* Set up to return from userspace. */
+-      if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++      if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+               regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+       } else {
+               err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index e68a845..8b140e6 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
++static struct notifier_block sysfs_cpu_nb = {
+       .notifier_call  = sysfs_cpu_notify,
+ };
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 88929b1..bece8f8 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
+       return flags;
+ }
++extern void gr_handle_kernel_exploit(void);
++
+ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+                              int signr)
+ {
+@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
++
++      gr_handle_kernel_exploit();
++
+       do_exit(signr);
+ }
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index d4f463a..8fb7431 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -34,6 +34,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+ #include "setup.h"
+@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       vdso_base = VDSO32_MBASE;
+ #endif
+-      current->mm->context.vdso_base = 0;
++      current->mm->context.vdso_base = ~0UL;
+       /* vDSO has a problem and was disabled, just don't "enable" it for the
+        * process
+@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       vdso_base = get_unmapped_area(NULL, vdso_base,
+                                     (vdso_pages << PAGE_SHIFT) +
+                                     ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+-                                    0, 0);
++                                    0, MAP_PRIVATE | MAP_EXECUTABLE);
+       if (IS_ERR_VALUE(vdso_base)) {
+               rc = vdso_base;
+               goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+-      if (likely(access_ok(VERIFY_READ, from, n)))
+-              n = __copy_from_user(to, from, n);
+-      else
+-              memset(to, 0, n);
+-      return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+-      if (likely(access_ok(VERIFY_WRITE, to, n)))
+-              n = __copy_to_user(to, from, n);
+-      return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+                          unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+       return n;
+ }
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 8726779..a33c512 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -33,6 +33,10 @@
+ #include <linux/magic.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int __user *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * Check whether the instruction at regs->nip is a store using
+  * an update addressing form which will update r1.
+@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+        * indicate errors in DSISR but can validly be set in SRR1.
+        */
+       if (trap == 0x400)
+-              error_code &= 0x48200000;
++              error_code &= 0x58200000;
+       else
+               is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -371,7 +402,7 @@ good_area:
+          * "undefined".  Of those that can be set, this is the only
+          * one which seems bad.
+          */
+-      if (error_code & 0x10000000)
++      if (error_code & DSISR_GUARDED)
+                 /* Guarded storage error. */
+               goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -386,7 +417,7 @@ good_area:
+                * processors use the same I/D cache coherency mechanism
+                * as embedded.
+                */
+-              if (error_code & DSISR_PROTFAULT)
++              if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+                       goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+@@ -471,6 +502,23 @@ bad_area:
+ bad_area_nosemaphore:
+       /* User mode accesses cause a SIGSEGV */
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++                      if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++                      if (is_exec && regs->nip == address) {
++#endif
++                              switch (pax_handle_fetch_fault(regs)) {
++                              }
++
++                              pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++                              do_group_exit(SIGKILL);
++                      }
++              }
++#endif
++
+               _exception(SIGSEGV, regs, code, address);
+               goto bail;
+       }
+diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
+index 67a42ed..cd463e0 100644
+--- a/arch/powerpc/mm/mmap_64.c
++++ b/arch/powerpc/mm/mmap_64.c
+@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
+ {
+       unsigned long rnd = 0;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE) {
+               /* 8MB for 32bit, 1GB for 64bit */
+               if (is_32bit_task())
+@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+        */
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
+index e779642..e5bb889 100644
+--- a/arch/powerpc/mm/mmu_context_nohash.c
++++ b/arch/powerpc/mm/mmu_context_nohash.c
+@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
++static struct notifier_block mmu_context_cpu_nb = {
+       .notifier_call  = mmu_context_cpu_notify,
+ };
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index cafad40..9cbc0fc 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
+       return ret;
+ }
+-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
++static struct notifier_block ppc64_numa_nb = {
+       .notifier_call = cpu_numa_callback,
+       .priority = 1 /* Must run before sched domains notifier. */
+ };
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 3e99c14..f00953c 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+       if ((mm->task_size - len) < addr)
+               return 0;
+       vma = find_vma(mm, addr);
+-      return (!vma || (addr + len) <= vma->vm_start);
++      return check_heap_stack_gap(vma, addr, len, 0);
+ }
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+       info.align_offset = 0;
+       addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              addr += mm->delta_mmap;
++#endif
++
+       while (addr < TASK_SIZE) {
+               info.low_limit = addr;
+               if (!slice_scan_available(addr, available, 1, &addr))
+@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+       if (fixed && addr > (mm->task_size - len))
+               return -EINVAL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++              addr = 0;
++#endif
++
+       /* If hint, make sure it matches our alignment restrictions */
+       if (!fixed && addr) {
+               addr = _ALIGN_UP(addr, 1ul << pshift);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index 9098692..3d54cd1 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+       return VM_FAULT_NOPAGE;
+ }
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+                               unsigned long address,
+-                              void *buf, int len, int write)
++                              void *buf, size_t len, int write)
+ {
+       struct spu_context *ctx = vma->vm_file->private_data;
+       unsigned long offset = address - vma->vm_start;
+diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
+index bdb738a..49c9f95 100644
+--- a/arch/powerpc/platforms/powermac/smp.c
++++ b/arch/powerpc/platforms/powermac/smp.c
+@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
++static struct notifier_block smp_core99_cpu_nb = {
+       .notifier_call  = smp_core99_cpu_notify,
+ };
+ #endif /* CONFIG_HOTPLUG_CPU */
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index c797832..ce575c8 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v)     (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1, 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec()   smp_mb()
+ #define smp_mb__after_atomic_dec()    smp_mb()
+ #define smp_mb__before_atomic_inc()   smp_mb()
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 4d7ccac..d03d0ad 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -9,8 +9,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+-#define L1_CACHE_BYTES     256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT     8
++#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD      32
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 78f4f87..598ce39 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE               (randomize_et_dyn(STACK_TOP / 3 * 2))
++#define ELF_ET_DYN_BASE               (STACK_TOP / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN   (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports. */
+@@ -222,9 +228,6 @@ struct linux_binprm;
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ int arch_setup_additional_pages(struct linux_binprm *, int);
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
+ #endif
+diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
+index c4a93d6..4d2a9b4 100644
+--- a/arch/s390/include/asm/exec.h
++++ b/arch/s390/include/asm/exec.h
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 9c33ed4..e40cbef 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -252,6 +252,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
+       return __copy_to_user(to, from, n);
+ }
+@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (__builtin_constant_p(n) && (n <= 256))
+               return uaccess.copy_from_user_small(n, from, to);
+       else
+@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-      unsigned int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(to);
+       might_fault();
+-      if (unlikely(sz != -1 && sz < n)) {
++
++      if ((long)n < 0)
++              return n;
++
++      if (unlikely(sz != (size_t)-1 && sz < n)) {
+               copy_from_user_overflow();
+               return n;
+       }
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 7845e15..59c4353 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+       /* Increase core size by size of got & plt and set start
+          offsets for got and plt. */
+-      me->core_size = ALIGN(me->core_size, 4);
+-      me->arch.got_offset = me->core_size;
+-      me->core_size += me->arch.got_size;
+-      me->arch.plt_offset = me->core_size;
+-      me->core_size += me->arch.plt_size;
++      me->core_size_rw = ALIGN(me->core_size_rw, 4);
++      me->arch.got_offset = me->core_size_rw;
++      me->core_size_rw += me->arch.got_size;
++      me->arch.plt_offset = me->core_size_rx;
++      me->core_size_rx += me->arch.plt_size;
+       return 0;
+ }
+@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+               if (info->got_initialized == 0) {
+                       Elf_Addr *gotent;
+-                      gotent = me->module_core + me->arch.got_offset +
++                      gotent = me->module_core_rw + me->arch.got_offset +
+                               info->got_offset;
+                       *gotent = val;
+                       info->got_initialized = 1;
+@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+                       rc = apply_rela_bits(loc, val, 0, 64, 0);
+               else if (r_type == R_390_GOTENT ||
+                        r_type == R_390_GOTPLTENT) {
+-                      val += (Elf_Addr) me->module_core - loc;
++                      val += (Elf_Addr) me->module_core_rw - loc;
+                       rc = apply_rela_bits(loc, val, 1, 32, 1);
+               }
+               break;
+@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+       case R_390_PLTOFF64:    /* 16 bit offset from GOT to PLT. */
+               if (info->plt_initialized == 0) {
+                       unsigned int *ip;
+-                      ip = me->module_core + me->arch.plt_offset +
++                      ip = me->module_core_rx + me->arch.plt_offset +
+                               info->plt_offset;
+ #ifndef CONFIG_64BIT
+                       ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+                              val - loc + 0xffffUL < 0x1ffffeUL) ||
+                             (r_type == R_390_PLT32DBL &&
+                              val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+-                              val = (Elf_Addr) me->module_core +
++                              val = (Elf_Addr) me->module_core_rx +
+                                       me->arch.plt_offset +
+                                       info->plt_offset;
+                       val += rela->r_addend - loc;
+@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+       case R_390_GOTOFF32:    /* 32 bit offset to GOT.  */
+       case R_390_GOTOFF64:    /* 64 bit offset to GOT. */
+               val = val + rela->r_addend -
+-                      ((Elf_Addr) me->module_core + me->arch.got_offset);
++                      ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+               if (r_type == R_390_GOTOFF16)
+                       rc = apply_rela_bits(loc, val, 0, 16, 0);
+               else if (r_type == R_390_GOTOFF32)
+@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+               break;
+       case R_390_GOTPC:       /* 32 bit PC relative offset to GOT. */
+       case R_390_GOTPCDBL:    /* 32 bit PC rel. off. to GOT shifted by 1. */
+-              val = (Elf_Addr) me->module_core + me->arch.got_offset +
++              val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+                       rela->r_addend - loc;
+               if (r_type == R_390_GOTPC)
+                       rc = apply_rela_bits(loc, val, 1, 32, 0);
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 2bc3edd..ab9d598 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
+       }
+       return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-      return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+-      /* 8MB for 32bit, 1GB for 64bit */
+-      if (is_32bit_task())
+-              return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+-      else
+-              return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+-      unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+-      if (ret < mm->brk)
+-              return mm->brk;
+-      return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+-      unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+-      if (!(current->flags & PF_RANDOMIZE))
+-              return base;
+-      if (ret < base)
+-              return base;
+-      return ret;
+-}
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index 06bafec..2bca531 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+        */
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+        */
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = s390_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT                4
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
+index f9f3cd5..58ff438 100644
+--- a/arch/score/include/asm/exec.h
++++ b/arch/score/include/asm/exec.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+ #endif /* _ASM_SCORE_EXEC_H */
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index f4c6d02..e9355c3 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
+       return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+index 03f2b55..b0270327 100644
+--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
++static struct notifier_block shx3_cpu_notifier = {
+       .notifier_call          = shx3_cpu_callback,
+ };
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index 6777177..cb5e44f 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int do_colour_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (filp || (flags & MAP_SHARED))
+               do_colour_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_colour_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       int do_colour_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (filp || (flags & MAP_SHARED))
+               do_colour_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_colour_align)
+@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index be56a24..443328f 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i)      { (i) }
+ #define atomic_read(v)                (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return v->counter;
++}
+ #define atomic64_read(v)      (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      return v->counter;
++}
+ #define atomic_set(v, i)      (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      v->counter = i;
++}
+ #define atomic64_set(v, i)    (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      v->counter = i;
++}
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      return atomic64_add_ret_unchecked(1, v);
++}
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++      return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++      return atomic64_add_ret_unchecked(i, v);
++}
+ /*
+  * atomic_inc_and_test - increment and test
+@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+  * other cases.
+  */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++      atomic64_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++      atomic64_sub_unchecked(1, v);
++}
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+-      int c, old;
++      int c, old, new;
+       c = atomic_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic_cmpxchg((v), c, c + (a));
++
++              asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "tvs %%icc, 6\n"
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a)
++                           : "cc");
++
++              old = atomic_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+       ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++      return xchg(&v->counter, new);
++}
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+-      long c, old;
++      long c, old, new;
+       c = atomic64_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic64_cmpxchg((v), c, c + (a));
++
++              asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "tvs %%xcc, 6\n"
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a)
++                           : "cc");
++
++              old = atomic64_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+-      return c != (u);
++      return c != u;
+ }
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 5bb6991..5c2132e 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN    __alignof__(unsigned long long)
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index a24e41f..47677ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x10000UL
++
++#define PAX_DELTA_MMAP_LEN    16
++#define PAX_DELTA_STACK_LEN   16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this cpu supports.  This can NOT be done in userspace
+    on Sparc.  */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 370ca1e..d4f4a98 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -189,6 +189,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE               0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE        0x0000000070000000UL
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN    (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN   (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP     sparc64_elf_hwcap
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index 9b1c36d..209298b 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+ }
+ #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD)      pgd_populate((MM), (PGD), (PMD))
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+                                  unsigned long address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index bcfe063..b333142 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ }
+ #define pud_populate(MM, PUD, PMD)    pud_set(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD)     pud_populate((MM), (PUD), (PMD))
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index 6fc1348..390c50a 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
+ #define PAGE_SHARED   SRMMU_PAGE_SHARED
+ #define PAGE_COPY     SRMMU_PAGE_COPY
+ #define PAGE_READONLY SRMMU_PAGE_RDONLY
++#define PAGE_SHARED_NOEXEC    SRMMU_PAGE_SHARED_NOEXEC
++#define PAGE_COPY_NOEXEC      SRMMU_PAGE_COPY_NOEXEC
++#define PAGE_READONLY_NOEXEC  SRMMU_PAGE_RDONLY_NOEXEC
+ #define PAGE_KERNEL   SRMMU_PAGE_KERNEL
+ /* Top-level page directory - dummy used by init-mm.
+@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
+ /*         xwr */
+ #define __P000  PAGE_NONE
+-#define __P001  PAGE_READONLY
+-#define __P010  PAGE_COPY
+-#define __P011  PAGE_COPY
++#define __P001  PAGE_READONLY_NOEXEC
++#define __P010  PAGE_COPY_NOEXEC
++#define __P011  PAGE_COPY_NOEXEC
+ #define __P100  PAGE_READONLY
+ #define __P101  PAGE_READONLY
+ #define __P110  PAGE_COPY
+ #define __P111  PAGE_COPY
+ #define __S000        PAGE_NONE
+-#define __S001        PAGE_READONLY
+-#define __S010        PAGE_SHARED
+-#define __S011        PAGE_SHARED
++#define __S001        PAGE_READONLY_NOEXEC
++#define __S010        PAGE_SHARED_NOEXEC
++#define __S011        PAGE_SHARED_NOEXEC
+ #define __S100        PAGE_READONLY
+ #define __S101        PAGE_READONLY
+ #define __S110        PAGE_SHARED
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index 79da178..c2eede8 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -115,6 +115,11 @@
+                                   SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+                                   SRMMU_EXEC | SRMMU_REF)
++
++#define SRMMU_PAGE_SHARED_NOEXEC      __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC                __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC      __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++
+ #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+                                   SRMMU_DIRTY | SRMMU_REF)
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 9689176..63c18ea 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+       unsigned long tmp1, tmp2;
+       __asm__ __volatile__ (
+ "1:   ldsw            [%2], %0\n"
+ "     brlz,pn         %0, 2f\n"
+-"4:    add            %0, 1, %1\n"
++"4:    addcc          %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs             %%icc, 6\n"
++#endif
++
+ "     cas             [%2], %0, %1\n"
+ "     cmp             %0, %1\n"
+ "     bne,pn          %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+ "     .previous"
+       : "=&r" (tmp1), "=&r" (tmp2)
+       : "r" (lock)
+-      : "memory");
++      : "memory", "cc");
+ }
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+       int tmp1, tmp2;
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ "1:   ldsw            [%2], %0\n"
+ "     brlz,a,pn       %0, 2f\n"
+ "      mov            0, %0\n"
+-"     add             %0, 1, %1\n"
++"     addcc           %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs             %%icc, 6\n"
++#endif
++
+ "     cas             [%2], %0, %1\n"
+ "     cmp             %0, %1\n"
+ "     bne,pn          %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+       return tmp1;
+ }
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+       unsigned long tmp1, tmp2;
+       __asm__ __volatile__(
+ "1:   lduw    [%2], %0\n"
+-"     sub     %0, 1, %1\n"
++"     subcc   %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs     %%icc, 6\n"
++#endif
++
+ "     cas     [%2], %0, %1\n"
+ "     cmp     %0, %1\n"
+ "     bne,pn  %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+       unsigned long mask, tmp1, tmp2;
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+       __asm__ __volatile__(
+ "     stw             %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+       unsigned long mask, tmp1, tmp2, result;
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index dd38075..e7cac83 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -49,6 +49,8 @@ struct thread_info {
+       unsigned long           w_saved;
+       struct restart_block    restart_block;
++
++      unsigned long           lowest_stack;
+ };
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index d5e5042..9bfee76 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,6 +63,8 @@ struct thread_info {
+       struct pt_regs          *kern_una_regs;
+       unsigned int            kern_una_insn;
++      unsigned long           lowest_stack;
++
+       unsigned long           fpregs[0] __attribute__ ((aligned(64)));
+ };
+@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_UNALIGNED         5       /* allowed to do unaligned accesses */
+ /* flag bit 6 is available */
+ #define TIF_32BIT             7       /* 32-bit binary */
+-/* flag bit 8 is available */
++#define TIF_GRSEC_SETXID      8       /* update credentials on syscall entry/exit */
+ #define TIF_SECCOMP           9       /* secure computing */
+ #define TIF_SYSCALL_AUDIT     10      /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT        11      /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+  *       in using in assembly, else we can't use the mask as
+  *       an immediate value in instructions such as andcc.
+@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT    (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG   (1<<TIF_POLLING_NRFLAG)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
+ #define _TIF_USER_WORK_MASK   ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+                                _TIF_DO_NOTIFY_RESUME_MASK | \
+                                _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK    (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
++#define _TIF_WORK_SYSCALL             \
++      (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++       _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
++
++
+ /*
+  * Thread-synchronous status.
+  *
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index 0167d26..767bb0c 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 53a28dd..50c38c3 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+-      if (n && __access_ok((unsigned long) to, n))
++      if ((long)n < 0)
++              return n;
++
++      if (n && __access_ok((unsigned long) to, n)) {
++              if (!__builtin_constant_p(n))
++                      check_object_size(from, n, true);
+               return __copy_user(to, (__force void __user *) from, n);
+-      else
++      } else
+               return n;
+ }
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
++      if (!__builtin_constant_p(n))
++              check_object_size(from, n, true);
++
+       return __copy_user(to, (__force void __user *) from, n);
+ }
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-      if (n && __access_ok((unsigned long) from, n))
++      if ((long)n < 0)
++              return n;
++
++      if (n && __access_ok((unsigned long) from, n)) {
++              if (!__builtin_constant_p(n))
++                      check_object_size(to, n, false);
+               return __copy_user((__force void __user *) to, from, n);
+-      else
++      } else
+               return n;
+ }
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       return __copy_user((__force void __user *) to, from, n);
+ }
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index e562d3c..191f176 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+-      unsigned long ret = ___copy_from_user(to, from, size);
++      unsigned long ret;
++      if ((long)size < 0 || size > INT_MAX)
++              return size;
++
++      if (!__builtin_constant_p(size))
++              check_object_size(to, size, false);
++
++      ret = ___copy_from_user(to, from, size);
+       if (unlikely(ret))
+               ret = copy_from_user_fixup(to, from, size);
+@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+-      unsigned long ret = ___copy_to_user(to, from, size);
++      unsigned long ret;
++      if ((long)size < 0 || size > INT_MAX)
++              return size;
++
++      if (!__builtin_constant_p(size))
++              check_object_size(from, size, true);
++
++      ret = ___copy_to_user(to, from, size);
+       if (unlikely(ret))
+               ret = copy_to_user_fixup(to, from, size);
+       return ret;
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index d432fb2..6056af1 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -3,7 +3,7 @@
+ #
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ extra-y     := head_$(BITS).o
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index 5ef48da..11d460f 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
+               char  *base, *p;
+               int msg_len, loops;
++              if (strlen(var) + strlen(value) + 2 >
++                  sizeof(pkt) - sizeof(pkt.header)) {
++                      printk(KERN_ERR PFX
++                              "contents length: %zu, which more than max: %lu,"
++                              "so could not set (%s) variable to (%s).\n",
++                              strlen(var) + strlen(value) + 2,
++                              sizeof(pkt) - sizeof(pkt.header), var, value);
++                      return;
++              }
++
+               memset(&pkt, 0, sizeof(pkt));
+               pkt.header.data.tag.type = DS_DATA;
+               pkt.header.data.handle = cp->handle;
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index fdd819d..5af08c8 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
+         printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
+              r->psr, r->pc, r->npc, r->y, print_tainted());
+-      printk("PC: <%pS>\n", (void *) r->pc);
++      printk("PC: <%pA>\n", (void *) r->pc);
+       printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+              r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+       printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+              r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+-      printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++      printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+       printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+               rw = (struct reg_window32 *) fp;
+               pc = rw->ins[7];
+               printk("[%08lx : ", pc);
+-              printk("%pS ] ", (void *) pc);
++              printk("%pA ] ", (void *) pc);
+               fp = rw->ins[6];
+       } while (++count < 16);
+       printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index baebab2..9cd13b1 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
+       printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+              rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+       if (regs->tstate & TSTATE_PRIV)
+-              printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++              printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+ void show_regs(struct pt_regs *regs)
+@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
+       printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
+              regs->tpc, regs->tnpc, regs->y, print_tainted());
+-      printk("TPC: <%pS>\n", (void *) regs->tpc);
++      printk("TPC: <%pA>\n", (void *) regs->tpc);
+       printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+              regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+              regs->u_regs[3]);
+@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
+       printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+              regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+              regs->u_regs[15]);
+-      printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++      printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+       show_regwindow(regs);
+       show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
+                      ((tp && tp->task) ? tp->task->pid : -1));
+               if (gp->tstate & TSTATE_PRIV) {
+-                      printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++                      printk("             TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+                              (void *) gp->tpc,
+                              (void *) gp->o7,
+                              (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 79cc0d1..ec62734 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+ unsigned int prom_early_allocated __initdata;
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static struct of_pdt_ops prom_sparc_ops __initconst = {
+       .nextprop = prom_common_nextprop,
+       .getproplen = prom_getproplen,
+       .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 7ff45e4..a58f271 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+       int ret = 0;
+@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+       /* do the secure computing check first */
+       secure_computing_strict(regs->u_regs[UREG_G1]);
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               ret = tracehook_report_syscall_entry(regs);
+@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ {
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       audit_syscall_exit(regs);
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 3a8d184..49498a8 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (len > TASK_SIZE - PAGE_SIZE)
+               return -ENOMEM;
+       if (!addr)
+-              addr = TASK_UNMAPPED_BASE;
++              addr = current->mm->mmap_base;
+       info.flags = 0;
+       info.length = len;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 2daaaa6..4fb84dc 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       struct vm_area_struct * vma;
+       unsigned long task_size = TASK_SIZE;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+               /* We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+-              if ((flags & MAP_SHARED) &&
++              if ((filp || (flags & MAP_SHARED)) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+                       return -EINVAL;
+               return addr;
+@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOR_ALIGN(addr, pgoff);
+@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = min(task_size, VA_EXCLUDE_START);
+       info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = task_size;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       unsigned long task_size = STACK_TOP32;
+       unsigned long addr = addr0;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /* This should only ever run for 32-bit processes.  */
+@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               /* We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+-              if ((flags & MAP_SHARED) &&
++              if ((filp || (flags & MAP_SHARED)) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+                       return -EINVAL;
+               return addr;
+@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_color_align)
+@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = STACK_TOP32;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+ /* Essentially the same as PowerPC.  */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+       unsigned long rnd = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE) {
+               unsigned long val = get_random_int();
+               if (test_thread_flag(TIF_32BIT))
+@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+-      unsigned long random_factor = mmap_rnd();
++      unsigned long random_factor = mmap_rnd(mm);
+       unsigned long gap;
+       /*
+@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+           gap == RLIM_INFINITY ||
+           sysctl_legacy_va_layout) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+                       gap = (task_size / 6 * 5);
+               mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 22a1098..6255eb9 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
+ #endif
+       .align  32
+ 1:    ldx     [%g6 + TI_FLAGS], %l5
+-      andcc   %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++      andcc   %l5, _TIF_WORK_SYSCALL, %g0
+       be,pt   %icc, rtrap
+        nop
+       call    syscall_trace_leave
+@@ -184,7 +184,7 @@ linux_sparc_syscall32:
+       srl     %i5, 0, %o5                             ! IEU1
+       srl     %i2, 0, %o2                             ! IEU0  Group
+-      andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++      andcc   %l0, _TIF_WORK_SYSCALL, %g0
+       bne,pn  %icc, linux_syscall_trace32             ! CTI
+        mov    %i0, %l5                                ! IEU1
+       call    %l7                                     ! CTI   Group brk forced
+@@ -207,7 +207,7 @@ linux_sparc_syscall:
+       mov     %i3, %o3                                ! IEU1
+       mov     %i4, %o4                                ! IEU0  Group
+-      andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++      andcc   %l0, _TIF_WORK_SYSCALL, %g0
+       bne,pn  %icc, linux_syscall_trace               ! CTI   Group
+        mov    %i0, %l5                                ! IEU0
+ 2:    call    %l7                                     ! CTI   Group brk forced
+@@ -223,7 +223,7 @@ ret_sys_call:
+       cmp     %o0, -ERESTART_RESTARTBLOCK
+       bgeu,pn %xcc, 1f
+-       andcc  %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++       andcc  %l0, _TIF_WORK_SYSCALL, %g0
+       ldx     [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ 2:
+diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
+index 654e8aa..45f431b 100644
+--- a/arch/sparc/kernel/sysfs.c
++++ b/arch/sparc/kernel/sysfs.c
+@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
++static struct notifier_block sysfs_cpu_nb = {
+       .notifier_call  = sysfs_cpu_notify,
+ };
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 6629829..036032d 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+       static int die_counter;
+@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+                     count++ < 30                              &&
+                       (((unsigned long) rw) >= PAGE_OFFSET)   &&
+                     !(((unsigned long) rw) & 0x7)) {
+-                      printk("Caller[%08lx]: %pS\n", rw->ins[7],
++                      printk("Caller[%08lx]: %pA\n", rw->ins[7],
+                              (void *) rw->ins[7]);
+                       rw = (struct reg_window32 *)rw->ins[6];
+               }
+       }
+       printk("Instruction DUMP:");
+       instruction_dump ((unsigned long *) regs->pc);
+-      if(regs->psr & PSR_PS)
++      if(regs->psr & PSR_PS) {
++              gr_handle_kernel_exploit();
+               do_exit(SIGKILL);
++      }
+       do_exit(SIGSEGV);
+ }
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index b3f833a..ac74b2d 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+                      i + 1,
+                      p->trapstack[i].tstate, p->trapstack[i].tpc,
+                      p->trapstack[i].tnpc, p->trapstack[i].tt);
+-              printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++              printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+       }
+ }
+@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+       lvl -= 0x100;
+       if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++              if (lvl == 6)
++                      pax_report_refcount_overflow(regs);
++#endif
++
+               sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+               die_if_kernel(buffer, regs);
+       }
+@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+       char buffer[32];
+-      
++
+       if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+                      0, lvl, SIGTRAP) == NOTIFY_STOP)
+               return;
++#ifdef CONFIG_PAX_REFCOUNT
++      if (lvl == 6)
++              pax_report_refcount_overflow(regs);
++#endif
++
+       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+       sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+              regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+       printk("%s" "ERROR(%d): ",
+              (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+-      printk("TPC<%pS>\n", (void *) regs->tpc);
++      printk("TPC<%pA>\n", (void *) regs->tpc);
+       printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
+              (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+              (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+                      smp_processor_id(),
+                      (type & 0x1) ? 'I' : 'D',
+                      regs->tpc);
+-              printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++              printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+               panic("Irrecoverable Cheetah+ parity error.");
+       }
+@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+              smp_processor_id(),
+              (type & 0x1) ? 'I' : 'D',
+              regs->tpc);
+-      printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+ struct sun4v_error_entry {
+@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+       printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+-      printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+-      printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++      printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+       printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+-      printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+-      printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++      printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+                       fp = (unsigned long)sf->fp + STACK_BIAS;
+               }
+-              printk(" [%016lx] %pS\n", pc, (void *) pc);
++              printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+               if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+                       int index = tsk->curr_ret_stack;
+                       if (tsk->ret_stack && index >= graph) {
+                               pc = tsk->ret_stack[index - graph].ret;
+-                              printk(" [%016lx] %pS\n", pc, (void *) pc);
++                              printk(" [%016lx] %pA\n", pc, (void *) pc);
+                               graph++;
+                       }
+               }
+@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+       return (struct reg_window *) (fp + STACK_BIAS);
+ }
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+       static int die_counter;
+@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+               while (rw &&
+                      count++ < 30 &&
+                      kstack_valid(tp, (unsigned long) rw)) {
+-                      printk("Caller[%016lx]: %pS\n", rw->ins[7],
++                      printk("Caller[%016lx]: %pA\n", rw->ins[7],
+                              (void *) rw->ins[7]);
+                       rw = kernel_stack_up(rw);
+@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+               }
+               user_instruction_dump ((unsigned int __user *) regs->tpc);
+       }
+-      if (regs->tstate & TSTATE_PRIV)
++      if (regs->tstate & TSTATE_PRIV) {
++              gr_handle_kernel_exploit();
+               do_exit(SIGKILL);
++      }
+       do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 8201c25e..072a2a7 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
+       static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+       if (__ratelimit(&ratelimit)) {
+-              printk("Kernel unaligned access at TPC[%lx] %pS\n",
++              printk("Kernel unaligned access at TPC[%lx] %pA\n",
+                      regs->tpc, (void *) regs->tpc);
+       }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index dbe119b..089c7c1 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ lib-$(CONFIG_SPARC32) += ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 85c233d..68500e0 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -17,7 +17,12 @@
+ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    lduw    [%o1], %g1
+-      add     %g1, %o0, %g7
++      addcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %icc, 6
++#endif
++
+       cas     [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic_add)
++ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    lduw    [%o1], %g1
++      add     %g1, %o0, %g7
++      cas     [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %icc, 2f
++       nop
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic_add_unchecked)
++
+ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    lduw    [%o1], %g1
+-      sub     %g1, %o0, %g7
++      subcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %icc, 6
++#endif
++
+       cas     [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic_sub)
++ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    lduw    [%o1], %g1
++      sub     %g1, %o0, %g7
++      cas     [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %icc, 2f
++       nop
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic_sub_unchecked)
++
+ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    lduw    [%o1], %g1
+-      add     %g1, %o0, %g7
++      addcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %icc, 6
++#endif
++
+       cas     [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic_add_ret)
++ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    lduw    [%o1], %g1
++      addcc   %g1, %o0, %g7
++      cas     [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %icc, 2f
++       add    %g7, %o0, %g7
++      sra     %g7, 0, %o0
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic_add_ret_unchecked)
++
+ ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    lduw    [%o1], %g1
+-      sub     %g1, %o0, %g7
++      subcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %icc, 6
++#endif
++
+       cas     [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
+ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    ldx     [%o1], %g1
+-      add     %g1, %o0, %g7
++      addcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %xcc, 6
++#endif
++
+       casx    [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic64_add)
++ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    ldx     [%o1], %g1
++      addcc   %g1, %o0, %g7
++      casx    [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %xcc, 2f
++       nop
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic64_add_unchecked)
++
+ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    ldx     [%o1], %g1
+-      sub     %g1, %o0, %g7
++      subcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %xcc, 6
++#endif
++
+       casx    [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic64_sub)
++ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    ldx     [%o1], %g1
++      subcc   %g1, %o0, %g7
++      casx    [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %xcc, 2f
++       nop
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic64_sub_unchecked)
++
+ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    ldx     [%o1], %g1
+-      add     %g1, %o0, %g7
++      addcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %xcc, 6
++#endif
++
+       casx    [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b)
+ ENDPROC(atomic64_add_ret)
++ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
++      BACKOFF_SETUP(%o2)
++1:    ldx     [%o1], %g1
++      addcc   %g1, %o0, %g7
++      casx    [%o1], %g1, %g7
++      cmp     %g1, %g7
++      bne,pn  %xcc, 2f
++       add    %g7, %o0, %g7
++      mov     %g7, %o0
++      retl
++       nop
++2:    BACKOFF_SPIN(%o2, %o3, 1b)
++ENDPROC(atomic64_add_ret_unchecked)
++
+ ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+ 1:    ldx     [%o1], %g1
+-      sub     %g1, %o0, %g7
++      subcc   %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++      tvs     %xcc, 6
++#endif
++
+       casx    [%o1], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 0c4e35e..745d3e4 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+ EXPORT_SYMBOL(atomic64_dec_if_positive);
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 30c3ecc..736f015 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
+ obj-y                   += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index e98bfda..ea8d221 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -21,6 +21,9 @@
+ #include <linux/perf_event.h>
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+       return safe_compute_effective_address(regs, insn);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++      vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      unsigned int *kaddr;
++
++      vmf->page = alloc_page(GFP_HIGHUSER);
++      if (!vmf->page)
++              return VM_FAULT_OOM;
++
++      kaddr = kmap(vmf->page);
++      memset(kaddr, 0, PAGE_SIZE);
++      kaddr[0] = 0x9DE3BFA8U; /* save */
++      flush_dcache_page(vmf->page);
++      kunmap(vmf->page);
++      return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++      .close = pax_emuplt_close,
++      .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++      int ret;
++
++      INIT_LIST_HEAD(&vma->anon_vma_chain);
++      vma->vm_mm = current->mm;
++      vma->vm_start = addr;
++      vma->vm_end = addr + PAGE_SIZE;
++      vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++      vma->vm_ops = &pax_vm_ops;
++
++      ret = insert_vm_struct(current->mm, vma);
++      if (ret)
++              return ret;
++
++      ++current->mm->total_vm;
++      return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int sethi1, sethi2, jmpl;
++
++              err = get_user(sethi1, (unsigned int *)regs->pc);
++              err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++              err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x03000000U &&
++                  (jmpl & 0xFFFFE000U) == 0x81C06000U)
++              {
++                      unsigned int addr;
++
++                      regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++                      addr = regs->u_regs[UREG_G1];
++                      addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ba;
++
++              err = get_user(ba, (unsigned int *)regs->pc);
++
++              if (err)
++                      break;
++
++              if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++                      unsigned int addr;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++                      else
++                              addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #3 */
++              unsigned int sethi, bajmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->pc);
++              err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++              err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++                              addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                      else
++                              addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 1 */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->pc);
++              err |= get_user(ba, (unsigned int *)(regs->pc+4));
++              err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int addr, save, call;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++                      else
++                              addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++                      err = get_user(save, (unsigned int *)addr);
++                      err |= get_user(call, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      if (err)
++                              break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++                      if (save == 0x9DE3BFA8U &&
++                          (call & 0xC0000000U) == 0x40000000U &&
++                          nop == 0x01000000U)
++                      {
++                              struct vm_area_struct *vma;
++                              unsigned long call_dl_resolve;
++
++                              down_read(&current->mm->mmap_sem);
++                              call_dl_resolve = current->mm->call_dl_resolve;
++                              up_read(&current->mm->mmap_sem);
++                              if (likely(call_dl_resolve))
++                                      goto emulate;
++
++                              vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++                              down_write(&current->mm->mmap_sem);
++                              if (current->mm->call_dl_resolve) {
++                                      call_dl_resolve = current->mm->call_dl_resolve;
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      goto emulate;
++                              }
++
++                              call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++                              if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              if (pax_insert_vma(vma, call_dl_resolve)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              current->mm->call_dl_resolve = call_dl_resolve;
++                              up_write(&current->mm->mmap_sem);
++
++emulate:
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->pc = call_dl_resolve;
++                              regs->npc = addr+4;
++                              return 3;
++                      }
++#endif
++
++                      /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++                      if ((save & 0xFFC00000U) == 0x05000000U &&
++                          (call & 0xFFFFE000U) == 0x85C0A000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G2] = addr + 4;
++                              addr = (save & 0x003FFFFFU) << 10;
++                              addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                              regs->pc = addr;
++                              regs->npc = addr+4;
++                              return 3;
++                      }
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 2 */
++              unsigned int save, call, nop;
++
++              err = get_user(save, (unsigned int *)(regs->pc-4));
++              err |= get_user(call, (unsigned int *)regs->pc);
++              err |= get_user(nop, (unsigned int *)(regs->pc+4));
++              if (err)
++                      break;
++
++              if (save == 0x9DE3BFA8U &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++                      regs->u_regs[UREG_RETPC] = regs->pc;
++                      regs->pc = dl_resolve;
++                      regs->npc = dl_resolve+4;
++                      return 3;
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+                                     int text_fault)
+ {
+@@ -230,6 +504,24 @@ good_area:
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 2:
++                      case 3:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               /* Allow reads even for write-only mappings */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 5062ff3..e0b75f3 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+       printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+              regs->tpc);
+       printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+-      printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++      printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+       printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+       dump_stack();
+       unhandled_fault(regs->tpc, current, regs);
+@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+       show_regs(regs);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++      vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      unsigned int *kaddr;
++
++      vmf->page = alloc_page(GFP_HIGHUSER);
++      if (!vmf->page)
++              return VM_FAULT_OOM;
++
++      kaddr = kmap(vmf->page);
++      memset(kaddr, 0, PAGE_SIZE);
++      kaddr[0] = 0x9DE3BFA8U; /* save */
++      flush_dcache_page(vmf->page);
++      kunmap(vmf->page);
++      return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++      .close = pax_emuplt_close,
++      .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++      int ret;
++
++      INIT_LIST_HEAD(&vma->anon_vma_chain);
++      vma->vm_mm = current->mm;
++      vma->vm_start = addr;
++      vma->vm_end = addr + PAGE_SIZE;
++      vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++      vma->vm_ops = &pax_vm_ops;
++
++      ret = insert_vm_struct(current->mm, vma);
++      if (ret)
++              return ret;
++
++      ++current->mm->total_vm;
++      return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int sethi1, sethi2, jmpl;
++
++              err = get_user(sethi1, (unsigned int *)regs->tpc);
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x03000000U &&
++                  (jmpl & 0xFFFFE000U) == 0x81C06000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++                      addr = regs->u_regs[UREG_G1];
++                      addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ba;
++
++              err = get_user(ba, (unsigned int *)regs->tpc);
++
++              if (err)
++                      break;
++
++              if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++                      unsigned long addr;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++                      else
++                              addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #3 */
++              unsigned int sethi, bajmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++                              addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++                      else
++                              addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #4 */
++              unsigned int sethi, mov1, call, mov2;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(call, (unsigned int *)(regs->tpc+8));
++              err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  mov1 == 0x8210000FU &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  mov2 == 0x9E100001U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++                      addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #5 */
++              unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++              err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++              err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++              err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                  (or1 & 0xFFFFE000U) == 0x82106000U &&
++                  (or2 & 0xFFFFE000U) == 0x8A116000U &&
++                  sllx == 0x83287020U &&
++                  jmpl == 0x81C04005U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++                      regs->u_regs[UREG_G1] <<= 32;
++                      regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++                      addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #6 */
++              unsigned int sethi, sethi1, sethi2, sllx, or,  jmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++              err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++              err |= get_user(or, (unsigned int *)(regs->tpc+16));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                  sllx == 0x83287020U &&
++                  (or & 0xFFFFE000U) == 0x8A116000U &&
++                  jmpl == 0x81C04005U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] <<= 32;
++                      regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++                      addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 1 */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++                      unsigned int save, call;
++                      unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++                      else
++                              addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      err = get_user(save, (unsigned int *)addr);
++                      err |= get_user(call, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      if (err)
++                              break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++                      if (save == 0x9DE3BFA8U &&
++                          (call & 0xC0000000U) == 0x40000000U &&
++                          nop == 0x01000000U)
++                      {
++                              struct vm_area_struct *vma;
++                              unsigned long call_dl_resolve;
++
++                              down_read(&current->mm->mmap_sem);
++                              call_dl_resolve = current->mm->call_dl_resolve;
++                              up_read(&current->mm->mmap_sem);
++                              if (likely(call_dl_resolve))
++                                      goto emulate;
++
++                              vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++                              down_write(&current->mm->mmap_sem);
++                              if (current->mm->call_dl_resolve) {
++                                      call_dl_resolve = current->mm->call_dl_resolve;
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      goto emulate;
++                              }
++
++                              call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++                              if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              if (pax_insert_vma(vma, call_dl_resolve)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              current->mm->call_dl_resolve = call_dl_resolve;
++                              up_write(&current->mm->mmap_sem);
++
++emulate:
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->tpc = call_dl_resolve;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++#endif
++
++                      /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++                      if ((save & 0xFFC00000U) == 0x05000000U &&
++                          (call & 0xFFFFE000U) == 0x85C0A000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G2] = addr + 4;
++                              addr = (save & 0x003FFFFFU) << 10;
++                              addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++                              if (test_thread_flag(TIF_32BIT))
++                                      addr &= 0xFFFFFFFFUL;
++
++                              regs->tpc = addr;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++
++                      /* PaX: 64-bit PLT stub */
++                      err = get_user(sethi1, (unsigned int *)addr);
++                      err |= get_user(sethi2, (unsigned int *)(addr+4));
++                      err |= get_user(or1, (unsigned int *)(addr+8));
++                      err |= get_user(or2, (unsigned int *)(addr+12));
++                      err |= get_user(sllx, (unsigned int *)(addr+16));
++                      err |= get_user(add, (unsigned int *)(addr+20));
++                      err |= get_user(jmpl, (unsigned int *)(addr+24));
++                      err |= get_user(nop, (unsigned int *)(addr+28));
++                      if (err)
++                              break;
++
++                      if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++                          (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                          (or1 & 0xFFFFE000U) == 0x88112000U &&
++                          (or2 & 0xFFFFE000U) == 0x8A116000U &&
++                          sllx == 0x89293020U &&
++                          add == 0x8A010005U &&
++                          jmpl == 0x89C14000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++                              regs->u_regs[UREG_G4] <<= 32;
++                              regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++                              regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++                              regs->u_regs[UREG_G4] = addr + 24;
++                              addr = regs->u_regs[UREG_G5];
++                              regs->tpc = addr;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++              }
++      } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      do { /* PaX: unpatched PLT emulation step 2 */
++              unsigned int save, call, nop;
++
++              err = get_user(save, (unsigned int *)(regs->tpc-4));
++              err |= get_user(call, (unsigned int *)regs->tpc);
++              err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++              if (err)
++                      break;
++
++              if (save == 0x9DE3BFA8U &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              dl_resolve &= 0xFFFFFFFFUL;
++
++                      regs->u_regs[UREG_RETPC] = regs->tpc;
++                      regs->tpc = dl_resolve;
++                      regs->tnpc = dl_resolve+4;
++                      return 3;
++              }
++      } while (0);
++#endif
++
++      do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (ba & 0xFFF00000U) == 0x30600000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+       struct mm_struct *mm = current->mm;
+@@ -341,6 +804,29 @@ retry:
+       if (!vma)
+               goto bad_area;
++#ifdef CONFIG_PAX_PAGEEXEC
++      /* PaX: detect ITLB misses on non-exec pages */
++      if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++          !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++      {
++              if (address != regs->tpc)
++                      goto good_area;
++
++              up_read(&mm->mmap_sem);
++              switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++              case 2:
++              case 3:
++                      return;
++#endif
++
++              }
++              pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++              do_group_exit(SIGKILL);
++      }
++#endif
++
+       /* Pure DTLB misses do not tell us whether the fault causing
+        * load/store/atomic was a write or not, it only says that there
+        * was no match.  So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index d2b5944..d878f3c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+                                                       unsigned long addr,
+                                                       unsigned long len,
+                                                       unsigned long pgoff,
+-                                                      unsigned long flags)
++                                                      unsigned long flags,
++                                                      unsigned long offset)
+ {
+       unsigned long task_size = TASK_SIZE;
+       struct vm_unmapped_area_info info;
+@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = min(task_size, VA_EXCLUDE_START);
+       info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = task_size;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -58,7 +66,8 @@ static unsigned long
+ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                                 const unsigned long len,
+                                 const unsigned long pgoff,
+-                                const unsigned long flags)
++                                const unsigned long flags,
++                                const unsigned long offset)
+ {
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = STACK_TOP32;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long task_size = TASK_SIZE;
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       if (test_thread_flag(TIF_32BIT))
+               task_size = STACK_TOP32;
+@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, HPAGE_SIZE);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+       else
+               return hugetlb_get_unmapped_area_topdown(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+ }
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index f4500c6..889656c 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1, 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic dec and inc don't implement barrier, so provide them if needed. */
+ #define smp_mb__before_atomic_dec()   smp_mb()
+ #define smp_mb__after_atomic_dec()    smp_mb()
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index a9a5299..0fce79e 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
++#include <linux/const.h>
+ #include <arch/chip.h>
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT                CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT                CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index 8a082bc..7a6bf87 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                         const void __user *from,
+                                         unsigned long n)
+ {
+-      int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(to);
+-      if (likely(sz == -1 || sz >= n))
++      if (likely(sz == (size_t)-1 || sz >= n))
+               n = _copy_from_user(to, from, n);
+       else
+               copy_from_user_overflow();
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 650ccff..45fe2d6 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       return vm_unmapped_area(&info);
+ }
+@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+       info.high_limit = current->mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       addr = vm_unmapped_area(&info);
+       /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 133f7de..1d6f2f1 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
+       $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+       $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS   += -fplugin-arg-constify_plugin-no-constify
++endif
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+diff --git a/arch/um/defconfig b/arch/um/defconfig
+index 08107a7..ab22afe 100644
+--- a/arch/um/defconfig
++++ b/arch/um/defconfig
+@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
+ CONFIG_X86_L1_CACHE_SHIFT=5
+ CONFIG_X86_XADD=y
+ CONFIG_X86_PPRO_FENCE=y
+-CONFIG_X86_WP_WORKS_OK=y
+ CONFIG_X86_INVLPG=y
+ CONFIG_X86_BSWAP=y
+ CONFIG_X86_POPAD_OK=y
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
++#include <linux/const.h>
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT               (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT               5
+ #endif
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 2e0a6b1..a64d0f5 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -8,6 +8,6 @@
+ /* No more #include "asm/arch/kmap_types.h" ! */
+-#define KM_TYPE_NR 14
++#define KM_TYPE_NR 15
+ #endif
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index 5ff53d9..5850cdf 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE     (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK     (~(PAGE_SIZE-1))
++#define ktla_ktva(addr)                       (addr)
++#define ktva_ktla(addr)                       (addr)
++
+ #ifndef __ASSEMBLY__
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index 0032f92..cd151e0 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x)        (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+       set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index bbcef52..6a2a483 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -367,22 +367,6 @@ int singlestepping(void * t)
+       return 2;
+ }
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() % 8192;
+-      return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+       unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+-#define L1_CACHE_SHIFT                (5)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT                5
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index fe120da..24177f7 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -239,7 +239,7 @@ config X86_HT
+ config X86_32_LAZY_GS
+       def_bool y
+-      depends on X86_32 && !CC_STACKPROTECTOR
++      depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+ config ARCH_HWEIGHT_CFLAGS
+       string
+@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
+ config X86_MSR
+       tristate "/dev/cpu/*/msr - Model-specific register support"
++      depends on !GRKERNSEC_KMEM
+       ---help---
+         This device gives privileged processes access to the x86
+         Model-Specific Registers (MSRs).  It is a character device with
+@@ -1096,7 +1097,7 @@ choice
+ config NOHIGHMEM
+       bool "off"
+-      depends on !X86_NUMAQ
++      depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+       ---help---
+         Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+         However, the address space of 32-bit x86 processors is only 4
+@@ -1133,7 +1134,7 @@ config NOHIGHMEM
+ config HIGHMEM4G
+       bool "4GB"
+-      depends on !X86_NUMAQ
++      depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+       ---help---
+         Select this if you have a 32-bit processor and between 1 and 4
+         gigabytes of physical RAM.
+@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
+       hex
+       default 0xB0000000 if VMSPLIT_3G_OPT
+       default 0x80000000 if VMSPLIT_2G
+-      default 0x78000000 if VMSPLIT_2G_OPT
++      default 0x70000000 if VMSPLIT_2G_OPT
+       default 0x40000000 if VMSPLIT_1G
+       default 0xC0000000
+       depends on X86_32
+@@ -1584,6 +1585,7 @@ config SECCOMP
+ config CC_STACKPROTECTOR
+       bool "Enable -fstack-protector buffer overflow detection"
++      depends on X86_64 || !PAX_MEMORY_UDEREF
+       ---help---
+         This option turns on the -fstack-protector GCC feature. This
+         feature puts, at the beginning of functions, a canary value on
+@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+       hex "Alignment value to which kernel should be aligned" if X86_32
+       default "0x1000000"
++      range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++      range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+       range 0x2000 0x1000000
+       ---help---
+         This value puts the alignment restrictions on physical address
+@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
+         If unsure, say N.
+ config COMPAT_VDSO
+-      def_bool y
++      def_bool n
+       prompt "Compat VDSO support"
+       depends on X86_32 || IA32_EMULATION
++      depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       ---help---
+         Map the 32-bit VDSO to the predictable old-style address too.
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index c026cca..14657ae 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
+ config X86_F00F_BUG
+       def_bool y
+-      depends on M586MMX || M586TSC || M586 || M486
++      depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
+ config X86_INVD_BUG
+       def_bool y
+@@ -327,7 +327,7 @@ config X86_INVD_BUG
+ config X86_ALIGNMENT_16
+       def_bool y
+-      depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++      depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+ config X86_INTEL_USERCOPY
+       def_bool y
+@@ -373,7 +373,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+       def_bool y
+-      depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++      depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ config X86_MINIMUM_CPU_FAMILY
+       int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index c198b7e..63eea60 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -84,7 +84,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+       bool "Write protect kernel read-only data structures"
+       default y
+-      depends on DEBUG_KERNEL
++      depends on DEBUG_KERNEL && BROKEN
+       ---help---
+         Mark the kernel read-only data as write-protected in the pagetables,
+         in order to catch accidental (and incorrect) writes to such const
+@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
+ config DEBUG_SET_MODULE_RONX
+       bool "Set loadable kernel module data as NX and text as RO"
+-      depends on MODULES
++      depends on MODULES && BROKEN
+       ---help---
+         This option helps catch unintended modifications to loadable
+         kernel module's text and read-only data. It also prevents execution
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 5c47726..8c4fa67 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -54,6 +54,7 @@ else
+         UTS_MACHINE := x86_64
+         CHECKFLAGS += -D__x86_64__ -m64
++        biarch := $(call cc-option,-m64)
+         KBUILD_AFLAGS += -m64
+         KBUILD_CFLAGS += -m64
+@@ -234,3 +235,12 @@ define archhelp
+   echo  '                  FDARGS="..."  arguments for the booted kernel'
+   echo  '                  FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++      $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 379814b..add62ce 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -65,6 +65,9 @@ KBUILD_CFLAGS        := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+                  $(call cc-option, -fno-stack-protector) \
+                  $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 878e4b9..20537ab 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+       u8 v;
+       const u32 *p = (const u32 *)addr;
+-      asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++      asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+       return v;
+ }
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+ static inline void set_bit(int nr, void *addr)
+ {
+-      asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++      asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index 5b75319..331a4ca 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+       u16 seg;
+-      asm("movw %%ds,%0" : "=rm" (seg));
++      asm volatile("movw %%ds,%0" : "=rm" (seg));
+       return seg;
+ }
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+       u8 diff;
+-      asm("repe; cmpsb; setnz %0"
++      asm volatile("repe; cmpsb; setnz %0"
+           : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+       return diff;
+ }
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 5ef205c..342191d 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index d606463..b887794 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -150,7 +150,6 @@ again:
+               *addr = max_addr;
+       }
+-free_pool:
+       efi_call_phys1(sys_table->boottime->free_pool, map);
+ fail:
+@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
+       if (i == map_size / desc_size)
+               status = EFI_NOT_FOUND;
+-free_pool:
+       efi_call_phys1(sys_table->boottime->free_pool, map);
+ fail:
+       return status;
+diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
+index a53440e..c3dbf1e 100644
+--- a/arch/x86/boot/compressed/efi_stub_32.S
++++ b/arch/x86/boot/compressed/efi_stub_32.S
+@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
+        * parameter 2, ..., param n. To make things easy, we save the return
+        * address of efi_call_phys in a global variable.
+        */
+-      popl    %ecx
+-      movl    %ecx, saved_return_addr(%edx)
+-      /* get the function pointer into ECX*/
+-      popl    %ecx
+-      movl    %ecx, efi_rt_function_ptr(%edx)
++      popl    saved_return_addr(%edx)
++      popl    efi_rt_function_ptr(%edx)
+       /*
+        * 3. Call the physical function.
+        */
+-      call    *%ecx
++      call    *efi_rt_function_ptr(%edx)
+       /*
+        * 4. Balance the stack. And because EAX contain the return value,
+@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
+ 1:    popl    %edx
+       subl    $1b, %edx
+-      movl    efi_rt_function_ptr(%edx), %ecx
+-      pushl   %ecx
++      pushl   efi_rt_function_ptr(%edx)
+       /*
+        * 10. Push the saved return address onto the stack and return.
+        */
+-      movl    saved_return_addr(%edx), %ecx
+-      pushl   %ecx
+-      ret
++      jmpl    *saved_return_addr(%edx)
+ ENDPROC(efi_call_phys)
+ .previous
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 1e3184f..0d11e2e 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -118,7 +118,7 @@ preferred_addr:
+       notl    %eax
+       andl    %eax, %ebx
+ #else
+-      movl    $LOAD_PHYSICAL_ADDR, %ebx
++      movl    $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+       /* Target address to relocate to for decompression */
+@@ -204,7 +204,7 @@ relocated:
+  * and where it was actually loaded.
+  */
+       movl    %ebp, %ebx
+-      subl    $LOAD_PHYSICAL_ADDR, %ebx
++      subl    $____LOAD_PHYSICAL_ADDR, %ebx
+       jz      2f      /* Nothing to be done if loaded at compiled addr. */
+ /*
+  * Process relocations.
+@@ -212,8 +212,7 @@ relocated:
+ 1:    subl    $4, %edi
+       movl    (%edi), %ecx
+-      testl   %ecx, %ecx
+-      jz      2f
++      jecxz   2f
+       addl    %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+       jmp     1b
+ 2:
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 16f24e6..47491a3 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -97,7 +97,7 @@ ENTRY(startup_32)
+       notl    %eax
+       andl    %eax, %ebx
+ #else
+-      movl    $LOAD_PHYSICAL_ADDR, %ebx
++      movl    $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+       /* Target address to relocate to for decompression */
+@@ -272,7 +272,7 @@ preferred_addr:
+       notq    %rax
+       andq    %rax, %rbp
+ #else
+-      movq    $LOAD_PHYSICAL_ADDR, %rbp
++      movq    $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+       /* Target address to relocate to for decompression */
+@@ -363,8 +363,8 @@ gdt:
+       .long   gdt
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+-      .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+-      .quad   0x00cf92000000ffff      /* __KERNEL_DS */
++      .quad   0x00af9b000000ffff      /* __KERNEL_CS */
++      .quad   0x00cf93000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+ gdt_end:
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 7cb56c6..d382d84 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -303,7 +303,7 @@ static void parse_elf(void *output)
+               case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+                       dest = output;
+-                      dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++                      dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+                       dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+               error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+-      if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++      if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+               error("Wrong destination address");
+ #endif
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4d3ff03..e4972ff 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+       u16 fcw = -1, fsw = -1;
+       u32 cr0;
+-      asm("movl %%cr0,%0" : "=r" (cr0));
++      asm volatile("movl %%cr0,%0" : "=r" (cr0));
+       if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+               cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+               asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+       u32 f0, f1;
+-      asm("pushfl ; "
++      asm volatile("pushfl ; "
+           "pushfl ; "
+           "popl %0 ; "
+           "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+               set_bit(X86_FEATURE_FPU, cpu.flags);
+       if (has_eflag(X86_EFLAGS_ID)) {
+-              asm("cpuid"
++              asm volatile("cpuid"
+                   : "=a" (max_intel_level),
+                     "=b" (cpu_vendor[0]),
+                     "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+               if (max_intel_level >= 0x00000001 &&
+                   max_intel_level <= 0x0000ffff) {
+-                      asm("cpuid"
++                      asm volatile("cpuid"
+                           : "=a" (tfms),
+                             "=c" (cpu.flags[4]),
+                             "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+                               cpu.model += ((tfms >> 16) & 0xf) << 4;
+               }
+-              asm("cpuid"
++              asm volatile("cpuid"
+                   : "=a" (max_amd_level)
+                   : "a" (0x80000000)
+                   : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+               if (max_amd_level >= 0x80000001 &&
+                   max_amd_level <= 0x8000ffff) {
+                       u32 eax = 0x80000001;
+-                      asm("cpuid"
++                      asm volatile("cpuid"
+                           : "+a" (eax),
+                             "=c" (cpu.flags[6]),
+                             "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 ecx = MSR_K7_HWCR;
+               u32 eax, edx;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               eax &= ~(1 << 15);
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               get_flags();    /* Make sure it really did something */
+               err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 ecx = MSR_VIA_FCR;
+               u32 eax, edx;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               eax |= (1<<1)|(1<<7);
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               set_bit(X86_FEATURE_CX8, cpu.flags);
+               err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 eax, edx;
+               u32 level = 1;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+-              asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+-              asm("cpuid"
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++              asm volatile("cpuid"
+                   : "+a" (level), "=d" (cpu.flags[0])
+                   : : "ecx", "ebx");
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               err = check_flags();
+       }
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 9ec06a1..2c25e79 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -409,10 +409,14 @@ setup_data:              .quad 0                 # 64-bit physical pointer to
+                                               # single linked list of
+                                               # struct setup_data
+-pref_address:         .quad LOAD_PHYSICAL_ADDR        # preferred load addr
++pref_address:         .quad ____LOAD_PHYSICAL_ADDR    # preferred load addr
+ #define ZO_INIT_SIZE  (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE  (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE  (VO__end - VO__text)
++#endif
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ #define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+ static int detect_memory_e820(void)
+ {
+-      int count = 0;
++      unsigned int count = 0;
+       struct biosregs ireg, oreg;
+       struct e820entry *desc = boot_params.e820_map;
+       static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index 11e8c6e..fdbb1ed 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+       boot_params.screen_info.vesapm_seg = oreg.es;
+       boot_params.screen_info.vesapm_off = oreg.di;
++      boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 43eda28..5ab5fdb 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+       char entry_buf[4];
+-      int i, len = 0;
++      unsigned int i, len = 0;
+       int key;
+       unsigned int v;
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 9105655..5e37f27 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+  * including this sentence is retained in full.
+  */
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -70,6 +72,8 @@
+       je      B192;                   \
+       leaq    32(r9),r9;
++#define ret   pax_force_retaddr 0, 1; ret
++
+ #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+       movq    r1,r2;                  \
+       movq    r3,r4;                  \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 477e9d7..3ab339f 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -31,6 +31,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+ #ifdef __x86_64__
+ .data
+@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
+       pop     %r14
+       pop     %r13
+       pop     %r12
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_gcm_dec)
+@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
+       pop     %r14
+       pop     %r13
+       pop     %r12
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_gcm_enc)
+@@ -1722,6 +1725,7 @@ _key_expansion_256a:
+       pxor %xmm1, %xmm0
+       movaps %xmm0, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_key_expansion_128)
+ ENDPROC(_key_expansion_256a)
+@@ -1748,6 +1752,7 @@ _key_expansion_192a:
+       shufps $0b01001110, %xmm2, %xmm1
+       movaps %xmm1, 0x10(TKEYP)
+       add $0x20, TKEYP
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_key_expansion_192a)
+@@ -1768,6 +1773,7 @@ _key_expansion_192b:
+       movaps %xmm0, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_key_expansion_192b)
+@@ -1781,6 +1787,7 @@ _key_expansion_256b:
+       pxor %xmm1, %xmm2
+       movaps %xmm2, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_key_expansion_256b)
+@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
+ #ifndef __x86_64__
+       popl KEYP
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_set_key)
+@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
+       popl KLEN
+       popl KEYP
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_enc)
+@@ -1974,6 +1983,7 @@ _aesni_enc1:
+       AESENC KEY STATE
+       movaps 0x70(TKEYP), KEY
+       AESENCLAST KEY STATE
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_enc1)
+@@ -2083,6 +2093,7 @@ _aesni_enc4:
+       AESENCLAST KEY STATE2
+       AESENCLAST KEY STATE3
+       AESENCLAST KEY STATE4
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_enc4)
+@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
+       popl KLEN
+       popl KEYP
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_dec)
+@@ -2164,6 +2176,7 @@ _aesni_dec1:
+       AESDEC KEY STATE
+       movaps 0x70(TKEYP), KEY
+       AESDECLAST KEY STATE
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_dec1)
+@@ -2273,6 +2286,7 @@ _aesni_dec4:
+       AESDECLAST KEY STATE2
+       AESDECLAST KEY STATE3
+       AESDECLAST KEY STATE4
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_dec4)
+@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
+       popl KEYP
+       popl LEN
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_ecb_enc)
+@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
+       popl KEYP
+       popl LEN
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_ecb_dec)
+@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
+       popl LEN
+       popl IVP
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_cbc_enc)
+@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
+       popl LEN
+       popl IVP
+ #endif
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_cbc_dec)
+@@ -2550,6 +2568,7 @@ _aesni_inc_init:
+       mov $1, TCTR_LOW
+       MOVQ_R64_XMM TCTR_LOW INC
+       MOVQ_R64_XMM CTR TCTR_LOW
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_inc_init)
+@@ -2579,6 +2598,7 @@ _aesni_inc:
+ .Linc_low:
+       movaps CTR, IV
+       PSHUFB_XMM BSWAP_MASK IV
++      pax_force_retaddr_bts
+       ret
+ ENDPROC(_aesni_inc)
+@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
+ .Lctr_enc_ret:
+       movups IV, (IVP)
+ .Lctr_enc_just_ret:
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_ctr_enc)
+@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
+       pxor INC, STATE4
+       movdqu STATE4, 0x70(OUTP)
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(aesni_xts_crypt8)
+diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
+index 784452e..46982c7 100644
+--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
++++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
+@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
+       write_block(RXl, RXr);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__blowfish_enc_blk32)
+@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
+       write_block(RXl, RXr);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__blowfish_dec_blk32)
+@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_ecb_enc_32way)
+@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_ecb_dec_32way)
+@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_cbc_dec_32way)
+@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_ctr_32way)
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 246c670..4d1ed00 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "blowfish-x86_64-asm.S"
+ .text
+@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
+       jnz .L__enc_xor;
+       write_block();
++      pax_force_retaddr 0, 1
+       ret;
+ .L__enc_xor:
+       xor_block();
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__blowfish_enc_blk)
+@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
+       movq %r11, %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_dec_blk)
+@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ .L__enc_xor4:
+@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__blowfish_enc_blk_4way)
+@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(blowfish_dec_blk_4way)
+diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+index ce71f92..2dd5b1e 100644
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -16,6 +16,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+       roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+                 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+                 %rcx, (%r9));
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+       roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
+                 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
+                 %rax, (%r9));
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+@@ -780,6 +783,7 @@ __camellia_enc_blk16:
+                   %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+                   %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
++      pax_force_retaddr_bts
+       ret;
+ .align 8
+@@ -865,6 +869,7 @@ __camellia_dec_blk16:
+                   %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+                   %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
++      pax_force_retaddr_bts
+       ret;
+ .align 8
+@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
+                    %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+                    %xmm8, %rsi);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ecb_enc_16way)
+@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
+                    %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+                    %xmm8, %rsi);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ecb_dec_16way)
+@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
+                    %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+                    %xmm8, %rsi);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_cbc_dec_16way)
+@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
+                    %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+                    %xmm8, %rsi);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ctr_16way)
+@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
+                    %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+                    %xmm8, %rsi);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_xts_crypt_16way)
+diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+index 91a1878..bcf340a 100644
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -11,6 +11,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+       roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
+                 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
+                 %rcx, (%r9));
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+       roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
+                 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
+                 %rax, (%r9));
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+@@ -802,6 +805,7 @@ __camellia_enc_blk32:
+                   %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
+                   %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
++      pax_force_retaddr_bts
+       ret;
+ .align 8
+@@ -887,6 +891,7 @@ __camellia_dec_blk32:
+                   %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
+                   %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
++      pax_force_retaddr_bts
+       ret;
+ .align 8
+@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ecb_enc_32way)
+@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ecb_dec_32way)
+@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_cbc_dec_32way)
+@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_ctr_32way)
+@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_xts_crypt_32way)
+diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
+index 310319c..ce174a4 100644
+--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "camellia-x86_64-asm_64.S"
+ .text
+@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
+       enc_outunpack(mov, RT1);
+       movq RRBP, %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ .L__enc_xor:
+       enc_outunpack(xor, RT1);
+       movq RRBP, %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__camellia_enc_blk)
+@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
+       dec_outunpack();
+       movq RRBP, %rbp;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_dec_blk)
+@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
+       movq RRBP, %rbp;
+       popq %rbx;
++      pax_force_retaddr 0, 1
+       ret;
+ .L__enc2_xor:
+@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
+       movq RRBP, %rbp;
+       popq %rbx;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__camellia_enc_blk_2way)
+@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
+       movq RRBP, %rbp;
+       movq RXOR, %rbx;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(camellia_dec_blk_2way)
+diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+index c35fd5d..c1ee236 100644
+--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "cast5-avx-x86_64-asm_64.S"
+@@ -281,6 +282,7 @@ __cast5_enc_blk16:
+       outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+       outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__cast5_enc_blk16)
+@@ -352,6 +354,7 @@ __cast5_dec_blk16:
+       outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+       outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
++      pax_force_retaddr 0, 1
+       ret;
+ .L__skip_dec:
+@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
+       vmovdqu RR4, (6*4*4)(%r11);
+       vmovdqu RL4, (7*4*4)(%r11);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ecb_enc_16way)
+@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
+       vmovdqu RR4, (6*4*4)(%r11);
+       vmovdqu RL4, (7*4*4)(%r11);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ecb_dec_16way)
+@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
+       popq %r12;
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_cbc_dec_16way)
+@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
+       popq %r12;
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ctr_16way)
+diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+index e3531f8..18ded3a 100644
+--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "cast6-avx-x86_64-asm_64.S"
+@@ -295,6 +296,7 @@ __cast6_enc_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__cast6_enc_blk8)
+@@ -340,6 +342,7 @@ __cast6_dec_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__cast6_dec_blk8)
+@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ecb_enc_8way)
+@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ecb_dec_8way)
+@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
+       popq %r12;
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_cbc_dec_8way)
+@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
+       popq %r12;
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ctr_8way)
+@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_xts_enc_8way)
+@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_xts_dec_8way)
+diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+index dbc4339..3d868c5 100644
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -45,6 +45,7 @@
+ #include <asm/inst.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+@@ -312,6 +313,7 @@ do_return:
+       popq    %rsi
+       popq    %rdi
+       popq    %rbx
++      pax_force_retaddr 0, 1
+         ret
+         ################################################################
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+index 586f41a..d02851e 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+@@ -18,6 +18,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+ .data
+@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
+       psrlq $1, T2
+       pxor T2, T1
+       pxor T1, DATA
++      pax_force_retaddr
+       ret
+ ENDPROC(__clmul_gf128mul_ble)
+@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
+       call __clmul_gf128mul_ble
+       PSHUFB_XMM BSWAP DATA
+       movups DATA, (%rdi)
++      pax_force_retaddr
+       ret
+ ENDPROC(clmul_ghash_mul)
+@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
+       PSHUFB_XMM BSWAP DATA
+       movups DATA, (%rdi)
+ .Lupdate_just_ret:
++      pax_force_retaddr
+       ret
+ ENDPROC(clmul_ghash_update)
+@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
+       pand .Lpoly, %xmm1
+       pxor %xmm1, %xmm0
+       movups %xmm0, (%rdi)
++      pax_force_retaddr
+       ret
+ ENDPROC(clmul_ghash_setkey)
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 9279e0b..9270820 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,4 +1,5 @@
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ # enter salsa20_encrypt_bytes
+ ENTRY(salsa20_encrypt_bytes)
+@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr 0, 1
+       ret
+ #   bytesatleast65:
+ ._bytesatleast65:
+@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr
+       ret
+ ENDPROC(salsa20_keysetup)
+@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr
+       ret
+ ENDPROC(salsa20_ivsetup)
+diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+index 2f202f4..d9164d6 100644
+--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "serpent-avx-x86_64-asm_64.S"
+@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
+       write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk8_avx)
+@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
+       write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_dec_blk8_avx)
+@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
+       store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_enc_8way_avx)
+@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
+       store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_dec_8way_avx)
+@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
+       store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_cbc_dec_8way_avx)
+@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
+       store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ctr_8way_avx)
+@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_enc_8way_avx)
+@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_dec_8way_avx)
+diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
+index b222085..abd483c 100644
+--- a/arch/x86/crypto/serpent-avx2-asm_64.S
++++ b/arch/x86/crypto/serpent-avx2-asm_64.S
+@@ -15,6 +15,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx2.S"
+ .file "serpent-avx2-asm_64.S"
+@@ -610,6 +611,7 @@ __serpent_enc_blk16:
+       write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk16)
+@@ -664,6 +666,7 @@ __serpent_dec_blk16:
+       write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_dec_blk16)
+@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_enc_16way)
+@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_dec_16way)
+@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_cbc_dec_16way)
+@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ctr_16way)
+@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_enc_16way)
+@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
+       vzeroupper;
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_dec_16way)
+diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+index acc066c..1559cc4 100644
+--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "serpent-sse2-x86_64-asm_64.S"
+ .text
+@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
+       write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ .L__enc_xor8:
+       xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk_8way)
+@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
+       write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_dec_blk_8way)
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index a410950..3356d42 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -29,6 +29,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #define CTX   %rdi    // arg1
+ #define BUF   %rsi    // arg2
+@@ -104,6 +105,7 @@
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
++      pax_force_retaddr 0, 1
+       ret
+       ENDPROC(\name)
+diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
+index 642f156..4ab07b9 100644
+--- a/arch/x86/crypto/sha256-avx-asm.S
++++ b/arch/x86/crypto/sha256-avx-asm.S
+@@ -49,6 +49,7 @@
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define    VMOVDQ vmovdqu
+@@ -460,6 +461,7 @@ done_hash:
+       popq    %r13
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha256_transform_avx)
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 9e86944..2e7f95a 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -50,6 +50,7 @@
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define       VMOVDQ vmovdqu
+@@ -720,6 +721,7 @@ done_hash:
+       popq    %r12
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha256_transform_rorx)
+diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
+index f833b74..c36ed14 100644
+--- a/arch/x86/crypto/sha256-ssse3-asm.S
++++ b/arch/x86/crypto/sha256-ssse3-asm.S
+@@ -47,6 +47,7 @@
+ ########################################################################
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define    MOVDQ movdqu
+@@ -471,6 +472,7 @@ done_hash:
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha256_transform_ssse3)
+diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
+index 974dde9..4533d34 100644
+--- a/arch/x86/crypto/sha512-avx-asm.S
++++ b/arch/x86/crypto/sha512-avx-asm.S
+@@ -49,6 +49,7 @@
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -364,6 +365,7 @@ updateblock:
+       mov     frame_RSPSAVE(%rsp), %rsp
+ nowork:
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha512_transform_avx)
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index 568b961..061ef1d 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -51,6 +51,7 @@
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -678,6 +679,7 @@ done_hash:
+       # Restore Stack Pointer
+       mov     frame_RSPSAVE(%rsp), %rsp
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha512_transform_rorx)
+diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
+index fb56855..e23914f 100644
+--- a/arch/x86/crypto/sha512-ssse3-asm.S
++++ b/arch/x86/crypto/sha512-ssse3-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -363,6 +364,7 @@ updateblock:
+       mov     frame_RSPSAVE(%rsp), %rsp
+ nowork:
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(sha512_transform_ssse3)
+diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+index 0505813..63b1d00 100644
+--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "twofish-avx-x86_64-asm_64.S"
+@@ -284,6 +285,7 @@ __twofish_enc_blk8:
+       outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
+       outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__twofish_enc_blk8)
+@@ -324,6 +326,7 @@ __twofish_dec_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__twofish_dec_blk8)
+@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
+       store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ecb_enc_8way)
+@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ecb_dec_8way)
+@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
+       popq %r12;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_cbc_dec_8way)
+@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
+       popq %r12;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ctr_8way)
+@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_xts_enc_8way)
+@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
+       /* dst <= regs xor IVs(in dst) */
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_xts_dec_8way)
+diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
+index e1a83b9..33006b9 100644
+--- a/arch/x86/crypto/twofish-avx2-asm_64.S
++++ b/arch/x86/crypto/twofish-avx2-asm_64.S
+@@ -11,6 +11,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx2.S"
+ .file "twofish-avx2-asm_64.S"
+@@ -422,6 +423,7 @@ __twofish_enc_blk16:
+       outunpack_enc16(RA, RB, RC, RD);
+       write_blocks16(RA, RB, RC, RD);
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(__twofish_enc_blk16)
+@@ -454,6 +456,7 @@ __twofish_dec_blk16:
+       outunpack_dec16(RA, RB, RC, RD);
+       write_blocks16(RA, RB, RC, RD);
++      pax_force_retaddr_bts
+       ret;
+ ENDPROC(__twofish_dec_blk16)
+@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
+       popq %r12;
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ecb_enc_16way)
+@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
+       popq %r12;
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ecb_dec_16way)
+@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
+       popq %r12;
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_cbc_dec_16way)
+@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
+       popq %r12;
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_ctr_16way)
+@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
+       popq %r12;
+       vzeroupper;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_xts_crypt_16way)
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 1c3b7ce..b365c5e 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr 0, 1
+       ret;
+ .L__enc_xor3:
+@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(__twofish_enc_blk_3way)
+@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr 0, 1
+       ret;
+ ENDPROC(twofish_dec_blk_3way)
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index a039d21..29e7615 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -22,6 +22,7 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+ #define a_offset      0
+ #define b_offset      4
+@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
+       popq    R1
+       movq    $1,%rax
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(twofish_enc_blk)
+@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
+       popq    R1
+       movq    $1,%rax
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(twofish_dec_blk)
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index 52ff81c..98af645 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+       unsigned long dump_start, dump_size;
+       struct user32 dump;
++      memset(&dump, 0, sizeof(dump));
++
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       has_dumped = 1;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index cf1a471..5ba2673 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+       sp -= frame_size;
+       /* Align the stack pointer according to the i386 ABI,
+        * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+-      sp = ((sp + 4) & -16ul) - 4;
++      sp = ((sp - 12) & -16ul) - 4;
+       return (void __user *) sp;
+ }
+@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+                * These are actually not used anymore, but left because some
+                * gdb versions depend on them as a marker.
+                */
+-              put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++              put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       if (err)
+@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+               0xb8,
+               __NR_ia32_rt_sigreturn,
+               0x80cd,
+-              0,
++              0
+       };
+       frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
+@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+-              err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
++              __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+               if (ksig->ka.sa.sa_flags & SA_RESTORER)
+                       restorer = ksig->ka.sa.sa_restorer;
++              else if (current->mm->context.vdso)
++                      /* Return stub is in 32bit vsyscall page */
++                      restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+               else
+-                      restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+-                                               rt_sigreturn);
++                      restorer = &frame->retcode;
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+               /*
+                * Not actually used anymore, but left because some gdb
+                * versions need it.
+                */
+-              put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++              put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 474dc1b..9297c58 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -15,8 +15,10 @@
+ #include <asm/irqflags.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+ #include <linux/err.h>
++#include <asm/alternative-asm.h>
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+ #include <linux/elf-em.h>
+@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
++      .macro pax_enter_kernel_user
++      pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call pax_enter_kernel_user
++#endif
++      .endm
++
++      .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++      pushq %rax
++      pushq %r11
++      call pax_randomize_kstack
++      popq %r11
++      popq %rax
++#endif
++      .endm
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call pax_erase_kstack
++#endif
++      .endm
++
+ /*
+  * 32bit SYSENTER instruction entry.
+  *
+@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
+       CFI_REGISTER    rsp,rbp
+       SWAPGS_UNSAFE_STACK
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+-      addq    $(KERNEL_STACK_OFFSET),%rsp
+-      /*
+-       * No need to follow this irqs on/off section: the syscall
+-       * disabled irqs, here we enable it straight after entry:
+-       */
+-      ENABLE_INTERRUPTS(CLBR_NONE)
+       movl    %ebp,%ebp               /* zero extension */
+       pushq_cfi $__USER32_DS
+       /*CFI_REL_OFFSET ss,0*/
+@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
+       CFI_REL_OFFSET rsp,0
+       pushfq_cfi
+       /*CFI_REL_OFFSET rflags,0*/
+-      movl    TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
+-      CFI_REGISTER rip,r10
++      orl     $X86_EFLAGS_IF,(%rsp)
++      GET_THREAD_INFO(%r11)
++      movl    TI_sysenter_return(%r11), %r11d
++      CFI_REGISTER rip,r11
+       pushq_cfi $__USER32_CS
+       /*CFI_REL_OFFSET cs,0*/
+       movl    %eax, %eax
+-      pushq_cfi %r10
++      pushq_cfi %r11
+       CFI_REL_OFFSET rip,0
+       pushq_cfi %rax
+       cld
+       SAVE_ARGS 0,1,0
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
++      /*
++       * No need to follow this irqs on/off section: the syscall
++       * disabled irqs, here we enable it straight after entry:
++       */
++      ENABLE_INTERRUPTS(CLBR_NONE)
+       /* no need to do an access_ok check here because rbp has been
+          32bit zero extended */ 
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      addq    pax_user_shadow_base,%rbp
++      ASM_PAX_OPEN_USERLAND
++#endif
++
+       ASM_STAC
+ 1:    movl    (%rbp),%ebp
+       _ASM_EXTABLE(1b,ia32_badarg)
+       ASM_CLAC
+-      orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+-      testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ASM_PAX_CLOSE_USERLAND
++#endif
++
++      GET_THREAD_INFO(%r11)
++      orl    $TS_COMPAT,TI_status(%r11)
++      testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+       CFI_REMEMBER_STATE
+       jnz  sysenter_tracesys
+       cmpq    $(IA32_NR_syscalls-1),%rax
+@@ -162,12 +209,15 @@ sysenter_do_call:
+ sysenter_dispatch:
+       call    *ia32_sys_call_table(,%rax,8)
+       movq    %rax,RAX-ARGOFFSET(%rsp)
++      GET_THREAD_INFO(%r11)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      testl   $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl   $_TIF_ALLWORK_MASK,TI_flags(%r11)
+       jnz     sysexit_audit
+ sysexit_from_sys_call:
+-      andl    $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      pax_exit_kernel_user
++      pax_erase_kstack
++      andl    $~TS_COMPAT,TI_status(%r11)
+       /* clear IF, that popfq doesn't enable interrupts early */
+       andl  $~0x200,EFLAGS-R11(%rsp) 
+       movl    RIP-R11(%rsp),%edx              /* User %eip */
+@@ -193,6 +243,9 @@ sysexit_from_sys_call:
+       movl %eax,%esi                  /* 2nd arg: syscall number */
+       movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
+       call __audit_syscall_entry
++
++      pax_erase_kstack
++
+       movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
+       cmpq $(IA32_NR_syscalls-1),%rax
+       ja ia32_badsys
+@@ -204,7 +257,7 @@ sysexit_from_sys_call:
+       .endm
+       .macro auditsys_exit exit
+-      testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+       jnz ia32_ret_from_sys_call
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -215,11 +268,12 @@ sysexit_from_sys_call:
+ 1:    setbe %al               /* 1 if error, 0 if not */
+       movzbl %al,%edi         /* zero-extend that into %edi */
+       call __audit_syscall_exit
++      GET_THREAD_INFO(%r11)
+       movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
+       movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl %edi,TI_flags(%r11)
+       jz \exit
+       CLEAR_RREGS -ARGOFFSET
+       jmp int_with_check
+@@ -237,7 +291,7 @@ sysexit_audit:
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+-      testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+       jz      sysenter_auditsys
+ #endif
+       SAVE_REST
+@@ -249,6 +303,9 @@ sysenter_tracesys:
+       RESTORE_REST
+       cmpq    $(IA32_NR_syscalls-1),%rax
+       ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
++
++      pax_erase_kstack
++
+       jmp     sysenter_do_call
+       CFI_ENDPROC
+ ENDPROC(ia32_sysenter_target)
+@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+       CFI_STARTPROC32 simple
+       CFI_SIGNAL_FRAME
+-      CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
++      CFI_DEF_CFA     rsp,0
+       CFI_REGISTER    rip,rcx
+       /*CFI_REGISTER  rflags,r11*/
+       SWAPGS_UNSAFE_STACK
+       movl    %esp,%r8d
+       CFI_REGISTER    rsp,r8
+       movq    PER_CPU_VAR(kernel_stack),%rsp
++      SAVE_ARGS 8*6,0,0
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * No need to follow this irqs on/off section: the syscall
+        * disabled irqs and here we enable it straight after entry:
+        */
+       ENABLE_INTERRUPTS(CLBR_NONE)
+-      SAVE_ARGS 8,0,0
+       movl    %eax,%eax       /* zero extension */
+       movq    %rax,ORIG_RAX-ARGOFFSET(%rsp)
+       movq    %rcx,RIP-ARGOFFSET(%rsp)
+@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
+       /* no need to do an access_ok check here because r8 has been
+          32bit zero extended */ 
+       /* hardware stack frame is complete now */      
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ASM_PAX_OPEN_USERLAND
++      movq    pax_user_shadow_base,%r8
++      addq    RSP-ARGOFFSET(%rsp),%r8
++#endif
++
+       ASM_STAC
+ 1:    movl    (%r8),%r9d
+       _ASM_EXTABLE(1b,ia32_badarg)
+       ASM_CLAC
+-      orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+-      testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ASM_PAX_CLOSE_USERLAND
++#endif
++
++      GET_THREAD_INFO(%r11)
++      orl   $TS_COMPAT,TI_status(%r11)
++      testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+       CFI_REMEMBER_STATE
+       jnz   cstar_tracesys
+       cmpq $IA32_NR_syscalls-1,%rax
+@@ -319,12 +395,15 @@ cstar_do_call:
+ cstar_dispatch:
+       call *ia32_sys_call_table(,%rax,8)
+       movq %rax,RAX-ARGOFFSET(%rsp)
++      GET_THREAD_INFO(%r11)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+       jnz sysretl_audit
+ sysretl_from_sys_call:
+-      andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      pax_exit_kernel_user
++      pax_erase_kstack
++      andl $~TS_COMPAT,TI_status(%r11)
+       RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+       movl RIP-ARGOFFSET(%rsp),%ecx
+       CFI_REGISTER rip,rcx
+@@ -352,7 +431,7 @@ sysretl_audit:
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+-      testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+       jz cstar_auditsys
+ #endif
+       xchgl %r9d,%ebp
+@@ -366,11 +445,19 @@ cstar_tracesys:
+       xchgl %ebp,%r9d
+       cmpq $(IA32_NR_syscalls-1),%rax
+       ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
++
++      pax_erase_kstack
++
+       jmp cstar_do_call
+ END(ia32_cstar_target)
+                               
+ ia32_badarg:
+       ASM_CLAC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ASM_PAX_CLOSE_USERLAND
++#endif
++
+       movq $-EFAULT,%rax
+       jmp ia32_sysret
+       CFI_ENDPROC
+@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
+       CFI_REL_OFFSET  rip,RIP-RIP
+       PARAVIRT_ADJUST_EXCEPTION_FRAME
+       SWAPGS
+-      /*
+-       * No need to follow this irqs on/off section: the syscall
+-       * disabled irqs and here we enable it straight after entry:
+-       */
+-      ENABLE_INTERRUPTS(CLBR_NONE)
+       movl %eax,%eax
+       pushq_cfi %rax
+       cld
+       /* note the registers are not zero extended to the sf.
+          this could be a problem. */
+       SAVE_ARGS 0,1,0
+-      orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+-      testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
++      /*
++       * No need to follow this irqs on/off section: the syscall
++       * disabled irqs and here we enable it straight after entry:
++       */
++      ENABLE_INTERRUPTS(CLBR_NONE)
++      GET_THREAD_INFO(%r11)
++      orl   $TS_COMPAT,TI_status(%r11)
++      testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+       jnz ia32_tracesys
+       cmpq $(IA32_NR_syscalls-1),%rax
+       ja ia32_badsys
+@@ -442,6 +536,9 @@ ia32_tracesys:
+       RESTORE_REST
+       cmpq $(IA32_NR_syscalls-1),%rax
+       ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
++
++      pax_erase_kstack
++
+       jmp ia32_do_call
+ END(ia32_syscall)
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index 8e0ceec..af13504 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+  */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+-      typeof(ubuf->st_uid) uid = 0;
+-      typeof(ubuf->st_gid) gid = 0;
++      typeof(((struct stat64 *)0)->st_uid) uid = 0;
++      typeof(((struct stat64 *)0)->st_gid) gid = 0;
+       SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
+       SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
+       if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index 372231c..a5aa1a1 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -18,6 +18,45 @@
+       .endm
+ #endif
++#ifdef KERNEXEC_PLUGIN
++      .macro pax_force_retaddr_bts rip=0
++      btsq $63,\rip(%rsp)
++      .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++      .macro pax_force_retaddr rip=0, reload=0
++      btsq $63,\rip(%rsp)
++      .endm
++      .macro pax_force_fptr ptr
++      btsq $63,\ptr
++      .endm
++      .macro pax_set_fptr_mask
++      .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      .macro pax_force_retaddr rip=0, reload=0
++      .if \reload
++      pax_set_fptr_mask
++      .endif
++      orq %r10,\rip(%rsp)
++      .endm
++      .macro pax_force_fptr ptr
++      orq %r10,\ptr
++      .endm
++      .macro pax_set_fptr_mask
++      movabs $0x8000000000000000,%r10
++      .endm
++#endif
++#else
++      .macro pax_force_retaddr rip=0, reload=0
++      .endm
++      .macro pax_force_fptr ptr
++      .endm
++      .macro pax_force_retaddr_bts rip=0
++      .endm
++      .macro pax_set_fptr_mask
++      .endm
++#endif
++
+ .macro altinstruction_entry orig alt feature orig_len alt_len
+       .long \orig - .
+       .long \alt - .
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 58ed6d9..f1cbe58 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+       ".pushsection .discard,\"aw\",@progbits\n"                      \
+       DISCARD_ENTRY(1)                                                \
+       ".popsection\n"                                                 \
+-      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
+       ".popsection"
+@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+       DISCARD_ENTRY(1)                                                \
+       DISCARD_ENTRY(2)                                                \
+       ".popsection\n"                                                 \
+-      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
+       ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
+       ".popsection"
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 3388034..050f0b9 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
+ #ifdef CONFIG_X86_LOCAL_APIC
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 20370c6..a2eb9b0 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+-              "lcall *%%cs:apm_bios_entry\n\t"
++              "lcall *%%ss:apm_bios_entry\n\t"
+               "setc %%al\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+-              "lcall *%%cs:apm_bios_entry\n\t"
++              "lcall *%%ss:apm_bios_entry\n\t"
+               "setc %%bl\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index 722aa3b..3a0bb27 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -22,7 +22,18 @@
+  */
+ static inline int atomic_read(const atomic_t *v)
+ {
+-      return (*(volatile int *)&(v)->counter);
++      return (*(volatile const int *)&(v)->counter);
++}
++
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return (*(volatile const int *)&(v)->counter);
+ }
+ /**
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
+ }
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      v->counter = i;
++}
++
++/**
+  * atomic_add - add integer to atomic variable
+  * @i: integer value to add
+  * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
+  */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "addl %1,%0"
++      asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "subl %1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (v->counter)
++                   : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "addl %1,%0\n"
+                    : "+m" (v->counter)
+                    : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
+  */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "subl %1,%0"
++      asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "addl %1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (v->counter)
++                   : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "subl %1,%0\n"
+                    : "+m" (v->counter)
+                    : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++      asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "addl %2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "+m" (v->counter), "=qm" (c)
+                    : "ir" (i) : "memory");
+       return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+  */
+ static inline void atomic_inc(atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "incl %0"
++      asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "decl %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "incl %0\n"
+                    : "+m" (v->counter));
+ }
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
+  */
+ static inline void atomic_dec(atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "decl %0"
++      asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "incl %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "decl %0\n"
+                    : "+m" (v->counter));
+ }
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "decl %0; sete %1"
++      asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "incl %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "+m" (v->counter), "=qm" (c)
+                    : : "memory");
+       return c != 0;
+@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "incl %0; sete %1"
++      asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "decl %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
++                   : "+m" (v->counter), "=qm" (c)
++                   : : "memory");
++      return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      unsigned char c;
++
++      asm volatile(LOCK_PREFIX "incl %0\n"
++                   "sete %1\n"
+                    : "+m" (v->counter), "=qm" (c)
+                    : : "memory");
+       return c != 0;
+@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++      asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "subl %2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sets %1\n"
+                    : "+m" (v->counter), "=qm" (c)
+                    : "ir" (i) : "memory");
+       return c;
+@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+  */
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
++      return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
+       return i + xadd(&v->counter, i);
+ }
+@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ }
+ #define atomic_inc_return(v)  (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v)  (atomic_sub_return(1, v))
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+       return cmpxchg(&v->counter, old, new);
+ }
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
+ static inline int atomic_xchg(atomic_t *v, int new)
+ {
+       return xchg(&v->counter, new);
+ }
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
++
+ /**
+  * __atomic_add_unless - add unless the number is already a given value
+  * @v: pointer of type atomic_t
+@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
+  */
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+-      int c, old;
++      int c, old, new;
+       c = atomic_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic_cmpxchg((v), c, c + (a));
++
++              asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "jno 0f\n"
++                           "subl %2,%0\n"
++                           "int $4\n0:\n"
++                           _ASM_EXTABLE(0b, 0b)
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a));
++
++              old = atomic_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+ /**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++      int val, c = hint, new;
++
++      /* sanity test, should be removed by compiler if hint is a constant */
++      if (!hint)
++              return __atomic_add_unless(v, 1, 0);
++
++      do {
++              asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "jno 0f\n"
++                           "decl %0\n"
++                           "int $4\n0:\n"
++                           _ASM_EXTABLE(0b, 0b)
++#endif
++
++                           : "=r" (new)
++                           : "0" (c));
++
++              val = atomic_cmpxchg(v, c, new);
++              if (val == c)
++                      return 1;
++              c = val;
++      } while (c);
++
++      return 0;
++}
++
++/**
+  * atomic_inc_short - increment of a short integer
+  * @v: pointer to type int
+  *
+@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
+ #endif
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr)                         \
+-      asm volatile(LOCK_PREFIX "andl %0,%1"                   \
+-                   : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++      asm volatile(LOCK_PREFIX "andl %1,%0"
++                   : "+m" (v->counter)
++                   : "r" (~(mask))
++                   : "memory");
++}
+-#define atomic_set_mask(mask, addr)                           \
+-      asm volatile(LOCK_PREFIX "orl %0,%1"                    \
+-                   : : "r" ((unsigned)(mask)), "m" (*(addr))  \
+-                   : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "andl %1,%0"
++                   : "+m" (v->counter)
++                   : "r" (~(mask))
++                   : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++      asm volatile(LOCK_PREFIX "orl %1,%0"
++                   : "+m" (v->counter)
++                   : "r" (mask)
++                   : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "orl %1,%0"
++                   : "+m" (v->counter)
++                   : "r" (mask)
++                   : "memory");
++}
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec()   barrier()
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index b154de7..aadebd8 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -12,6 +12,14 @@ typedef struct {
+       u64 __aligned(8) counter;
+ } atomic64_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val)    { (val) }
+ #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+@@ -37,21 +45,31 @@ typedef struct {
+       ATOMIC64_DECL_ONE(sym##_386)
+ ATOMIC64_DECL_ONE(add_386);
++ATOMIC64_DECL_ONE(add_unchecked_386);
+ ATOMIC64_DECL_ONE(sub_386);
++ATOMIC64_DECL_ONE(sub_unchecked_386);
+ ATOMIC64_DECL_ONE(inc_386);
++ATOMIC64_DECL_ONE(inc_unchecked_386);
+ ATOMIC64_DECL_ONE(dec_386);
++ATOMIC64_DECL_ONE(dec_unchecked_386);
+ #endif
+ #define alternative_atomic64(f, out, in...) \
+       __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+ ATOMIC64_DECL(read);
++ATOMIC64_DECL(read_unchecked);
+ ATOMIC64_DECL(set);
++ATOMIC64_DECL(set_unchecked);
+ ATOMIC64_DECL(xchg);
+ ATOMIC64_DECL(add_return);
++ATOMIC64_DECL(add_return_unchecked);
+ ATOMIC64_DECL(sub_return);
++ATOMIC64_DECL(sub_return_unchecked);
+ ATOMIC64_DECL(inc_return);
++ATOMIC64_DECL(inc_return_unchecked);
+ ATOMIC64_DECL(dec_return);
++ATOMIC64_DECL(dec_return_unchecked);
+ ATOMIC64_DECL(dec_if_positive);
+ ATOMIC64_DECL(inc_not_zero);
+ ATOMIC64_DECL(add_unless);
+@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
+ }
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++      return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+  * atomic64_xchg - xchg atomic64 variable
+  * @v: pointer to type atomic64_t
+  * @n: value to assign
+@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ }
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++      unsigned high = (unsigned)(i >> 32);
++      unsigned low = (unsigned)i;
++      alternative_atomic64(set, /* no output */,
++                           "S" (v), "b" (low), "c" (high)
++                           : "eax", "edx", "memory");
++}
++
++/**
+  * atomic64_read - read atomic64 variable
+  * @v: pointer to type atomic64_t
+  *
+@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+  }
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++      long long r;
++      alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
++      return r;
++ }
++
++/**
+  * atomic64_add_return - add and return
+  * @i: integer value to add
+  * @v: pointer to type atomic64_t
+@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+       return i;
+ }
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++      alternative_atomic64(add_return_unchecked,
++                           ASM_OUTPUT2("+A" (i), "+c" (v)),
++                           ASM_NO_INPUT_CLOBBER("memory"));
++      return i;
++}
++
+ /*
+  * Other variants with different arithmetic operators:
+  */
+@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
+       return a;
+ }
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      long long a;
++      alternative_atomic64(inc_return_unchecked, "=&A" (a),
++                           "S" (v) : "memory", "ecx");
++      return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+       long long a;
+@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
+ }
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++      __alternative_atomic64(add_unchecked, add_return_unchecked,
++                             ASM_OUTPUT2("+A" (i), "+c" (v)),
++                             ASM_NO_INPUT_CLOBBER("memory"));
++      return i;
++}
++
++/**
+  * atomic64_sub - subtract the atomic64 variable
+  * @i: integer value to subtract
+  * @v: pointer to type atomic64_t
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 0e1cbfc..5623683 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -18,7 +18,19 @@
+  */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+-      return (*(volatile long *)&(v)->counter);
++      return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      return (*(volatile const long *)&(v)->counter);
+ }
+ /**
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      v->counter = i;
++}
++
++/**
+  * atomic64_add - add integer to atomic64 variable
+  * @i: integer value to add
+  * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+  */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++      asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "subq %1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "=m" (v->counter)
++                   : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+       asm volatile(LOCK_PREFIX "addq %1,%0"
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
+  */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "subq %1,%0"
++      asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "addq %1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "=m" (v->counter)
++                   : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "subq %1,%0\n"
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++      asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "addq %2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "=m" (v->counter), "=qm" (c)
+                    : "er" (i), "m" (v->counter) : "memory");
+       return c;
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+  */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++      asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "decq %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "=m" (v->counter)
++                   : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+       asm volatile(LOCK_PREFIX "incq %0"
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
+  */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "decq %0"
++      asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "incq %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "=m" (v->counter)
++                   : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "decq %0\n"
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+ }
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "decq %0; sete %1"
++      asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "incq %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "=m" (v->counter), "=qm" (c)
+                    : "m" (v->counter) : "memory");
+       return c != 0;
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "incq %0; sete %1"
++      asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "decq %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "=m" (v->counter), "=qm" (c)
+                    : "m" (v->counter) : "memory");
+       return c != 0;
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+       unsigned char c;
+-      asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++      asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX "subq %2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sets %1\n"
+                    : "=m" (v->counter), "=qm" (c)
+                    : "er" (i), "m" (v->counter) : "memory");
+       return c;
+@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+  */
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++      return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+       return i + xadd(&v->counter, i);
+ }
+@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ }
+ #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+       return cmpxchg(&v->counter, old, new);
+ }
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+       return xchg(&v->counter, new);
+@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+  */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+-      long c, old;
++      long c, old, new;
+       c = atomic64_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic64_cmpxchg((v), c, c + (a));
++
++              asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "jno 0f\n"
++                           "sub %2,%0\n"
++                           "int $4\n0:\n"
++                           _ASM_EXTABLE(0b, 0b)
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a));
++
++              old = atomic64_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+-      return c != (u);
++      return c != u;
+ }
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 6dfd019..28e188d 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -40,7 +40,7 @@
+  * a mask operation on a byte.
+  */
+ #define IS_IMMEDIATE(nr)              (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr)     BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr)     BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr)                        (1 << ((nr) & 7))
+ /**
+@@ -486,7 +486,7 @@ static inline int fls(int x)
+  * at position 64.
+  */
+ #ifdef CONFIG_X86_64
+-static __always_inline int fls64(__u64 x)
++static __always_inline long fls64(__u64 x)
+ {
+       int bitpos = -1;
+       /*
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 4fa687a..60f2d39 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -6,10 +6,15 @@
+ #include <uapi/asm/boot.h>
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+                               + (CONFIG_PHYSICAL_ALIGN - 1)) \
+                               & ~(CONFIG_PHYSICAL_ALIGN - 1))
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2  PMD_SHIFT
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 48f99f1..d78ebf9 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,12 +5,13 @@
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT        (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
+index 9863ee3..4a1f8e1 100644
+--- a/arch/x86/include/asm/cacheflush.h
++++ b/arch/x86/include/asm/cacheflush.h
+@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
+       unsigned long pg_flags = pg->flags & _PGMT_MASK;
+       if (pg_flags == _PGMT_DEFAULT)
+-              return -1;
++              return ~0UL;
+       else if (pg_flags == _PGMT_WC)
+               return _PAGE_CACHE_WC;
+       else if (pg_flags == _PGMT_UC_MINUS)
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index 46fc474..b02b0f9 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+                                           int len, __wsum sum,
+                                           int *src_err_ptr, int *dst_err_ptr);
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++                                                int len, __wsum sum,
++                                                int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++                                                int len, __wsum sum,
++                                                int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+  *    Note: when you get a NULL pointer exception here this means someone
+  *    passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+                                                int *err_ptr)
+ {
+       might_sleep();
+-      return csum_partial_copy_generic((__force void *)src, dst,
++      return csum_partial_copy_generic_from_user((__force void *)src, dst,
+                                        len, sum, err_ptr, NULL);
+ }
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+ {
+       might_sleep();
+       if (access_ok(VERIFY_WRITE, dst, len))
+-              return csum_partial_copy_generic(src, (__force void *)dst,
++              return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+                                                len, sum, NULL, err_ptr);
+       if (len)
+diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
+index d47786a..ce1b05d 100644
+--- a/arch/x86/include/asm/cmpxchg.h
++++ b/arch/x86/include/asm/cmpxchg.h
+@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
+       __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+       __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++      __compiletime_error("Bad argument size for xadd_check_overflow");
+ extern void __add_wrong_size(void)
+       __compiletime_error("Bad argument size for add");
++extern void __add_check_overflow_wrong_size(void)
++      __compiletime_error("Bad argument size for add_check_overflow");
+ /*
+  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
+               __ret;                                                  \
+       })
++#define __xchg_op_check_overflow(ptr, arg, op, lock)                  \
++      ({                                                              \
++              __typeof__ (*(ptr)) __ret = (arg);                      \
++              switch (sizeof(*(ptr))) {                               \
++              case __X86_CASE_L:                                      \
++                      asm volatile (lock #op "l %0, %1\n"             \
++                                    "jno 0f\n"                        \
++                                    "mov %0,%1\n"                     \
++                                    "int $4\n0:\n"                    \
++                                    _ASM_EXTABLE(0b, 0b)              \
++                                    : "+r" (__ret), "+m" (*(ptr))     \
++                                    : : "memory", "cc");              \
++                      break;                                          \
++              case __X86_CASE_Q:                                      \
++                      asm volatile (lock #op "q %q0, %1\n"            \
++                                    "jno 0f\n"                        \
++                                    "mov %0,%1\n"                     \
++                                    "int $4\n0:\n"                    \
++                                    _ASM_EXTABLE(0b, 0b)              \
++                                    : "+r" (__ret), "+m" (*(ptr))     \
++                                    : : "memory", "cc");              \
++                      break;                                          \
++              default:                                                \
++                      __ ## op ## _check_overflow_wrong_size();       \
++              }                                                       \
++              __ret;                                                  \
++      })
++
+ /*
+  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
+  * Since this is generally used to protect other memory information, we
+@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
+ #define xadd_sync(ptr, inc)   __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc)  __xadd((ptr), (inc), "")
++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
++#define xadd_check_overflow(ptr, inc)         __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #define __add(ptr, inc, lock)                                         \
+       ({                                                              \
+               __typeof__ (*(ptr)) __ret = (inc);                      \
+diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
+index 59c6c40..5e0b22c 100644
+--- a/arch/x86/include/asm/compat.h
++++ b/arch/x86/include/asm/compat.h
+@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
+ typedef u32           compat_uint_t;
+ typedef u32           compat_ulong_t;
+ typedef u64 __attribute__((aligned(4))) compat_u64;
+-typedef u32           compat_uptr_t;
++typedef u32           __user compat_uptr_t;
+ struct compat_timespec {
+       compat_time_t   tv_sec;
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index e99ac27..10d834e 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -203,7 +203,7 @@
+ #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+ #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
+ #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
+-
++#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+ #define X86_FEATURE_FSGSBASE  (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+@@ -211,7 +211,7 @@
+ #define X86_FEATURE_BMI1      (9*32+ 3) /* 1st group bit manipulation extensions */
+ #define X86_FEATURE_HLE               (9*32+ 4) /* Hardware Lock Elision */
+ #define X86_FEATURE_AVX2      (9*32+ 5) /* AVX2 instructions */
+-#define X86_FEATURE_SMEP      (9*32+ 7) /* Supervisor Mode Execution Protection */
++#define X86_FEATURE_SMEP      (9*32+ 7) /* Supervisor Mode Execution Prevention */
+ #define X86_FEATURE_BMI2      (9*32+ 8) /* 2nd group bit manipulation extensions */
+ #define X86_FEATURE_ERMS      (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+ #define X86_FEATURE_INVPCID   (9*32+10) /* Invalidate Processor Context ID */
+@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
+ #undef  cpu_has_centaur_mcr
+ #define cpu_has_centaur_mcr   0
++#define cpu_has_pcid          boot_cpu_has(X86_FEATURE_PCID)
+ #endif /* CONFIG_X86_64 */
+ #if __GNUC__ >= 4
+@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+                            ".section .discard,\"aw\",@progbits\n"
+                            " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+                            ".previous\n"
+-                           ".section .altinstr_replacement,\"ax\"\n"
++                           ".section .altinstr_replacement,\"a\"\n"
+                            "3: movb $1,%0\n"
+                            "4:\n"
+                            ".previous\n"
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 8bf1c06..b6ae785 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+ #include <linux/percpu.h>
+@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+       desc->type              = (info->read_exec_only ^ 1) << 1;
+       desc->type             |= info->contents << 2;
++      desc->type             |= info->seg_not_present ^ 1;
+       desc->s                 = 1;
+       desc->dpl               = 0x3;
+@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+ }
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+ extern struct desc_ptr nmi_idt_descr;
+-extern gate_desc nmi_idt_table[];
+-
+-struct gdt_page {
+-      struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
++extern gate_desc nmi_idt_table[256];
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+-      return per_cpu(gdt_page, cpu).gdt;
++      return cpu_gdt_table[cpu];
+ }
+ #ifdef CONFIG_X86_64
+@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+                            unsigned long base, unsigned dpl, unsigned flags,
+                            unsigned short seg)
+ {
+-      gate->a = (seg << 16) | (base & 0xffff);
+-      gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++      gate->gate.offset_low   = base;
++      gate->gate.seg          = seg;
++      gate->gate.reserved     = 0;
++      gate->gate.type         = type;
++      gate->gate.s            = 0;
++      gate->gate.dpl          = dpl;
++      gate->gate.p            = 1;
++      gate->gate.offset_high  = base >> 16;
+ }
+ #endif
+@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++      pax_open_kernel();
+       memcpy(&idt[entry], gate, sizeof(*gate));
++      pax_close_kernel();
+ }
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++      pax_open_kernel();
+       memcpy(&ldt[entry], desc, 8);
++      pax_close_kernel();
+ }
+ static inline void
+@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
+       default:        size = sizeof(*gdt);            break;
+       }
++      pax_open_kernel();
+       memcpy(&gdt[entry], desc, size);
++      pax_close_kernel();
+ }
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+ static inline void native_load_tr_desc(void)
+ {
++      pax_open_kernel();
+       asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++      pax_close_kernel();
+ }
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+       struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+               gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++      pax_close_kernel();
+ }
+ #define _LDT_empty(info)                              \
+@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
+       preempt_enable();
+ }
+-static inline unsigned long get_desc_base(const struct desc_struct *desc)
++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
+ {
+       return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+ }
+@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ }
+ #ifdef CONFIG_X86_64
+-static inline void set_nmi_gate(int gate, void *addr)
++static inline void set_nmi_gate(int gate, const void *addr)
+ {
+       gate_desc s;
+@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
+ }
+ #endif
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+                            unsigned dpl, unsigned ist, unsigned seg)
+ {
+       gate_desc s;
+@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+  * Pentium F0 0F bugfix can have resulted in the mapped
+  * IDT being write-protected.
+  */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
+ /*
+  * This routine sets up an interrupt gate at directory privilege level 3.
+  */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+-      _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++      _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++      struct desc_struct d;
++
++      if (likely(limit))
++              limit = (limit - 1UL) >> PAGE_SHIFT;
++      pack_descriptor(&d, base, limit, 0xFB, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index 278441f..b95a174 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+                       unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+                       unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+               };
++              struct {
++                      u16 offset_low;
++                      u16 seg;
++                      unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++                      unsigned offset_high: 16;
++              } gate;
+       };
+ } __attribute__((packed));
+diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
+index ced283a..ffe04cc 100644
+--- a/arch/x86/include/asm/div64.h
++++ b/arch/x86/include/asm/div64.h
+@@ -39,7 +39,7 @@
+       __mod;                                                  \
+ })
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       union {
+               u64 v64;
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 9c999c1..3860cb8 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -243,7 +243,25 @@ extern int force_personality32;
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE               ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE   0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN    (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN   (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE   0x400000UL
++
++#define PAX_DELTA_MMAP_LEN    ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN   ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+@@ -296,16 +314,12 @@ do {                                                                     \
+ #define ARCH_DLINFO                                                   \
+ do {                                                                  \
+-      if (vdso_enabled)                                               \
+-              NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
+-                          (unsigned long)current->mm->context.vdso);  \
++      NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
+ } while (0)
+ #define ARCH_DLINFO_X32                                                       \
+ do {                                                                  \
+-      if (vdso_enabled)                                               \
+-              NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
+-                          (unsigned long)current->mm->context.vdso);  \
++      NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
+ } while (0)
+ #define AT_SYSINFO            32
+@@ -320,7 +334,7 @@ else                                                                       \
+ #endif /* !CONFIG_X86_32 */
+-#define VDSO_CURRENT_BASE     ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE     (current->mm->context.vdso)
+ #define VDSO_ENTRY                                                    \
+       ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages    syscall32_setup_pages
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ /*
+  * True on X86_32 or when emulating IA32 on X86_64
+  */
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index 75ce3f4..882e801 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -13,6 +13,6 @@ enum reboot_type {
+ extern enum reboot_type reboot_type;
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
+index e25cc33..7d3ec01 100644
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
+ #define user_insn(insn, output, input...)                             \
+ ({                                                                    \
+       int err;                                                        \
++      pax_open_userland();                                            \
+       asm volatile(ASM_STAC "\n"                                      \
+-                   "1:" #insn "\n\t"                                  \
++                   "1:"                                               \
++                   __copyuser_seg                                     \
++                   #insn "\n\t"                                       \
+                    "2: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
++      pax_close_userland();                                           \
+       err;                                                            \
+ })
+@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
+               "emms\n\t"              /* clear stack tags */
+               "fildl %P[addr]",       /* set F?P to defined value */
+               X86_FEATURE_FXSAVE_LEAK,
+-              [addr] "m" (tsk->thread.fpu.has_fpu));
++              [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
+       return fpu_restore_checking(&tsk->thread.fpu);
+ }
+diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
+index be27ba1..04a8801 100644
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -12,6 +12,7 @@
+ #include <asm/smap.h>
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)   \
++      typecheck(u32 __user *, uaddr);                         \
+       asm volatile("\t" ASM_STAC "\n"                         \
+                    "1:\t" insn "\n"                           \
+                    "2:\t" ASM_CLAC "\n"                       \
+@@ -20,15 +21,16 @@
+                    "\tjmp\t2b\n"                              \
+                    "\t.previous\n"                            \
+                    _ASM_EXTABLE(1b, 3b)                       \
+-                   : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++                   : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))    \
+                    : "i" (-EFAULT), "0" (oparg), "1" (0))
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)   \
++      typecheck(u32 __user *, uaddr);                         \
+       asm volatile("\t" ASM_STAC "\n"                         \
+                    "1:\tmovl  %2, %0\n"                       \
+                    "\tmovl\t%0, %3\n"                         \
+                    "\t" insn "\n"                             \
+-                   "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"     \
++                   "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n"       \
+                    "\tjnz\t1b\n"                              \
+                    "3:\t" ASM_CLAC "\n"                       \
+                    "\t.section .fixup,\"ax\"\n"               \
+@@ -38,7 +40,7 @@
+                    _ASM_EXTABLE(1b, 4b)                       \
+                    _ASM_EXTABLE(2b, 4b)                       \
+                    : "=&a" (oldval), "=&r" (ret),             \
+-                     "+m" (*uaddr), "=&r" (tem)               \
++                     "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem)  \
+                    : "r" (oparg), "i" (-EFAULT), "1" (0))
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+       pagefault_disable();
++      pax_open_userland();
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++              __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++              __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+                                  uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+       default:
+               ret = -ENOSYS;
+       }
++      pax_close_userland();
+       pagefault_enable();
+@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+               return -EFAULT;
++      pax_open_userland();
+       asm volatile("\t" ASM_STAC "\n"
+-                   "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
++                   "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
+                    "2:\t" ASM_CLAC "\n"
+                    "\t.section .fixup, \"ax\"\n"
+                    "3:\tmov     %3, %0\n"
+                    "\tjmp     2b\n"
+                    "\t.previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+-                   : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
++                   : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
+                    : "i" (-EFAULT), "r" (newval), "1" (oldval)
+                    : "memory"
+       );
++      pax_close_userland();
+       *uval = oldval;
+       return ret;
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index 1da97ef..9c2ebff 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index a203659..9889f1c 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -62,7 +62,7 @@ struct legacy_pic {
+       void (*init)(int auto_eoi);
+       int (*irq_pending)(unsigned int irq);
+       void (*make_irq)(unsigned int irq);
+-};
++} __do_const;
+ extern struct legacy_pic *legacy_pic;
+ extern struct legacy_pic null_legacy_pic;
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index d8e8eef..1765f78 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
+ "m" (*(volatile type __force *)addr) barrier); }
+ build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
+ build_mmio_read(__readb, "b", unsigned char, "=q", )
+-build_mmio_read(__readw, "w", unsigned short, "=r", )
+-build_mmio_read(__readl, "l", unsigned int, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
+ build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
+ build_mmio_write(writew, "w", unsigned short, "r", :"memory")
+@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+       return ioremap_nocache(offset, size);
+ }
+-extern void iounmap(volatile void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+ extern void set_iounmap_nonlazy(void);
+@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
+ #include <linux/vmalloc.h>
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++      return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++      return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+  * Convert a virtual cached pointer to an uncached pointer
+  */
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index bba3cf8..06bc8da 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
+       sti;                                    \
+       sysexit
++#define GET_CR0_INTO_RDI              mov %cr0, %rdi
++#define SET_RDI_INTO_CR0              mov %rdi, %cr0
++#define GET_CR3_INTO_RDI              mov %cr3, %rdi
++#define SET_RDI_INTO_CR3              mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN              iret
+ #define ENABLE_INTERRUPTS_SYSEXIT     sti; sysexit
+diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
+index 5a6d287..f815789 100644
+--- a/arch/x86/include/asm/kprobes.h
++++ b/arch/x86/include/asm/kprobes.h
+@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR)                                         \
+-      (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+-                            THREAD_SIZE - (unsigned long)(ADDR)))    \
+-       ? (MAX_STACK_SIZE)                                            \
+-       : (((unsigned long)current_thread_info()) +                   \
+-          THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR)  min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+ #define flush_insn_slot(p)    do { } while (0)
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 2d89e39..baee879 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -10,33 +10,97 @@ typedef struct {
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i)       atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i)     atomic_long_set_unchecked(&(l)->a, (i))
+ static inline void local_inc(local_t *l)
+ {
+-      asm volatile(_ASM_INC "%0"
++      asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_DEC "%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (l->a.counter));
++}
++
++static inline void local_inc_unchecked(local_unchecked_t *l)
++{
++      asm volatile(_ASM_INC "%0\n"
+                    : "+m" (l->a.counter));
+ }
+ static inline void local_dec(local_t *l)
+ {
+-      asm volatile(_ASM_DEC "%0"
++      asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_INC "%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (l->a.counter));
++}
++
++static inline void local_dec_unchecked(local_unchecked_t *l)
++{
++      asm volatile(_ASM_DEC "%0\n"
+                    : "+m" (l->a.counter));
+ }
+ static inline void local_add(long i, local_t *l)
+ {
+-      asm volatile(_ASM_ADD "%1,%0"
++      asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_SUB "%1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (l->a.counter)
++                   : "ir" (i));
++}
++
++static inline void local_add_unchecked(long i, local_unchecked_t *l)
++{
++      asm volatile(_ASM_ADD "%1,%0\n"
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+ }
+ static inline void local_sub(long i, local_t *l)
+ {
+-      asm volatile(_ASM_SUB "%1,%0"
++      asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_ADD "%1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+m" (l->a.counter)
++                   : "ir" (i));
++}
++
++static inline void local_sub_unchecked(long i, local_unchecked_t *l)
++{
++      asm volatile(_ASM_SUB "%1,%0\n"
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+ }
+@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
+ {
+       unsigned char c;
+-      asm volatile(_ASM_SUB "%2,%0; sete %1"
++      asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_ADD "%2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "+m" (l->a.counter), "=qm" (c)
+                    : "ir" (i) : "memory");
+       return c;
+@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
+ {
+       unsigned char c;
+-      asm volatile(_ASM_DEC "%0; sete %1"
++      asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_INC "%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "+m" (l->a.counter), "=qm" (c)
+                    : : "memory");
+       return c != 0;
+@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
+ {
+       unsigned char c;
+-      asm volatile(_ASM_INC "%0; sete %1"
++      asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_DEC "%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sete %1\n"
+                    : "+m" (l->a.counter), "=qm" (c)
+                    : : "memory");
+       return c != 0;
+@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
+ {
+       unsigned char c;
+-      asm volatile(_ASM_ADD "%2,%0; sets %1"
++      asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_SUB "%2,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   "sets %1\n"
+                    : "+m" (l->a.counter), "=qm" (c)
+                    : "ir" (i) : "memory");
+       return c;
+@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
+ static inline long local_add_return(long i, local_t *l)
+ {
+       long __i = i;
++      asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   _ASM_MOV "%0,%1\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
++                   : "+r" (i), "+m" (l->a.counter)
++                   : : "memory");
++      return i + __i;
++}
++
++/**
++ * local_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @l: pointer to type local_unchecked_t
++ *
++ * Atomically adds @i to @l and returns @i + @l
++ */
++static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
++{
++      long __i = i;
+       asm volatile(_ASM_XADD "%0, %1;"
+                    : "+r" (i), "+m" (l->a.counter)
+                    : : "memory");
+@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
+ #define local_cmpxchg(l, o, n) \
+       (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++      (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ /* Always has a lock prefix */
+ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
+new file mode 100644
+index 0000000..2bfd3ba
+--- /dev/null
++++ b/arch/x86/include/asm/mman.h
+@@ -0,0 +1,15 @@
++#ifndef _X86_MMAN_H
++#define _X86_MMAN_H
++
++#include <uapi/asm/mman.h>
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check       i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
++#endif
++#endif
++#endif
++
++#endif /* X86_MMAN_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 5f55e69..e20bfb1 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,7 +9,7 @@
+  * we put the segment information here.
+  */
+ typedef struct {
+-      void *ldt;
++      struct desc_struct *ldt;
+       int size;
+ #ifdef CONFIG_X86_64
+@@ -18,7 +18,19 @@ typedef struct {
+ #endif
+       struct mutex lock;
+-      void *vdso;
++      unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      unsigned long user_cs_base;
++      unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++      cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+ } mm_context_t;
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index cdbf367..4c73c9e 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (!(static_cpu_has(X86_FEATURE_PCID))) {
++              unsigned int i;
++              pgd_t *pgd;
++
++              pax_open_kernel();
++              pgd = get_cpu_pgd(smp_processor_id(), kernel);
++              for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++                      set_pgd_batched(pgd+i, native_make_pgd(0));
++              pax_close_kernel();
++      }
++#endif
++
+ #ifdef CONFIG_SMP
+       if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+ {
+       unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++      int tlbstate = TLBSTATE_OK;
++#endif
+       if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++              tlbstate = this_cpu_read(cpu_tlbstate.state);
++#endif
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               this_cpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+               /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              pax_open_kernel();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (static_cpu_has(X86_FEATURE_PCID))
++                      __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
++              else
++#endif
++
++              __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
++              __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
++              pax_close_kernel();
++              BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (static_cpu_has(X86_FEATURE_PCID)) {
++                      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++                              unsigned long descriptor[2];
++                              descriptor[0] = PCID_USER;
++                              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
++                      } else {
++                              write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++                              if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
++                                      write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++                              else
++                                      write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
++                      }
++              } else
++#endif
++
++                      load_cr3(get_cpu_pgd(cpu, kernel));
++#else
+               load_cr3(next->pgd);
++#endif
+               /* stop flush ipis for the previous mm */
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_LDT_nolock(&next->context);
+-      }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++              if (!(__supported_pte_mask & _PAGE_NX)) {
++                      smp_mb__before_clear_bit();
++                      cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++                      smp_mb__after_clear_bit();
++                      cpu_set(cpu, next->context.cpu_user_cs_mask);
++              }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++              if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++                           prev->context.user_cs_limit != next->context.user_cs_limit))
++                      set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++              else if (unlikely(tlbstate != TLBSTATE_OK))
++                      set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++      }
+       else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              pax_open_kernel();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (static_cpu_has(X86_FEATURE_PCID))
++                      __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
++              else
++#endif
++
++              __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
++              __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
++              pax_close_kernel();
++              BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (static_cpu_has(X86_FEATURE_PCID)) {
++                      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++                              unsigned long descriptor[2];
++                              descriptor[0] = PCID_USER;
++                              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
++                      } else {
++                              write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++                              if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
++                                      write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++                              else
++                                      write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
++                      }
++              } else
++#endif
++
++                      load_cr3(get_cpu_pgd(cpu, kernel));
++#endif
++
++#ifdef CONFIG_SMP
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       load_cr3(next->pgd);
++#endif
++
+                       load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++                      if (!(__supported_pte_mask & _PAGE_NX))
++                              cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++                      if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++                              set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+               }
++#endif
+       }
+-#endif
+ }
+ #define activate_mm(prev, next)                       \
+diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+index e3b7819..b257c64 100644
+--- a/arch/x86/include/asm/module.h
++++ b/arch/x86/include/asm/module.h
+@@ -5,6 +5,7 @@
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M486
+ #define MODULE_PROC_FAMILY "486 "
+ #elif defined CONFIG_M586
+@@ -57,8 +58,20 @@
+ #error unknown processor family
+ #endif
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++
+ #endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
+index 86f9301..b365cda 100644
+--- a/arch/x86/include/asm/nmi.h
++++ b/arch/x86/include/asm/nmi.h
+@@ -40,11 +40,11 @@ struct nmiaction {
+       nmi_handler_t           handler;
+       unsigned long           flags;
+       const char              *name;
+-};
++} __do_const;
+ #define register_nmi_handler(t, fn, fg, n, init...)   \
+ ({                                                    \
+-      static struct nmiaction init fn##_na = {        \
++      static const struct nmiaction init fn##_na = {  \
+               .handler = (fn),                        \
+               .name = (n),                            \
+               .flags = (fg),                          \
+@@ -52,7 +52,7 @@ struct nmiaction {
+       __register_nmi_handler((t), &fn##_na);          \
+ })
+-int __register_nmi_handler(unsigned int, struct nmiaction *);
++int __register_nmi_handler(unsigned int, const struct nmiaction *);
+ void unregister_nmi_handler(unsigned int, const char *);
+diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
+index c878924..21f4889 100644
+--- a/arch/x86/include/asm/page.h
++++ b/arch/x86/include/asm/page.h
+@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+       __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
+ #define __va(x)                       ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define __early_va(x)         ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
+ #define __boot_va(x)          __va(x)
+ #define __boot_pa(x)          __pa(x)
+diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
+index 0f1ddee..e2fc3d1 100644
+--- a/arch/x86/include/asm/page_64.h
++++ b/arch/x86/include/asm/page_64.h
+@@ -7,9 +7,9 @@
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+-static inline unsigned long __phys_addr_nodebug(unsigned long x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
+ {
+       unsigned long y = x - __START_KERNEL_map;
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index cfdc9ee..3f7b5d6 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
+       return (pmd_t) { ret };
+ }
+-static inline pmdval_t pmd_val(pmd_t pmd)
++static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
+ {
+       pmdval_t ret;
+@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+                           val);
+ }
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++      pgdval_t val = native_pgd_val(pgd);
++
++      if (sizeof(pgdval_t) > sizeof(long))
++              PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++                          val, (u64)val >> 32);
++      else
++              PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++                          val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+       set_pgd(pgdp, __pgd(0));
+@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+       pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++      return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++      return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -926,7 +953,7 @@ extern void default_banner(void);
+ #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr)   *%cs:addr
++#define PARA_INDIRECT(addr)   *%ss:addr
+ #endif
+ #define INTERRUPT_RETURN                                              \
+@@ -1001,6 +1028,21 @@ extern void default_banner(void);
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
+                 CLBR_NONE,                                            \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI                              \
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++      mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0                              \
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI                              \
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++      mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3                              \
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif        /* CONFIG_X86_32 */
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 0db1fca..52310cc 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -84,7 +84,7 @@ struct pv_init_ops {
+        */
+       unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+                         unsigned long addr, unsigned len);
+-};
++} __no_const;
+ struct pv_lazy_ops {
+@@ -98,7 +98,7 @@ struct pv_time_ops {
+       unsigned long long (*sched_clock)(void);
+       unsigned long long (*steal_clock)(int cpu);
+       unsigned long (*get_tsc_khz)(void);
+-};
++} __no_const;
+ struct pv_cpu_ops {
+       /* hooks for various privileged instructions */
+@@ -192,7 +192,7 @@ struct pv_cpu_ops {
+       void (*start_context_switch)(struct task_struct *prev);
+       void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const;
+ struct pv_irq_ops {
+       /*
+@@ -223,7 +223,7 @@ struct pv_apic_ops {
+                                unsigned long start_eip,
+                                unsigned long start_esp);
+ #endif
+-};
++} __no_const;
+ struct pv_mmu_ops {
+       unsigned long (*read_cr2)(void);
+@@ -313,6 +313,7 @@ struct pv_mmu_ops {
+       struct paravirt_callee_save make_pud;
+       void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++      void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif        /* PAGETABLE_LEVELS == 4 */
+ #endif        /* PAGETABLE_LEVELS >= 3 */
+@@ -324,6 +325,12 @@ struct pv_mmu_ops {
+          an mfn.  We can tell which is which from the index. */
+       void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+                          phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      unsigned long (*pax_open_kernel)(void);
++      unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+ struct arch_spinlock;
+@@ -334,7 +341,7 @@ struct pv_lock_ops {
+       void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+       int (*spin_trylock)(struct arch_spinlock *lock);
+       void (*spin_unlock)(struct arch_spinlock *lock);
+-};
++} __no_const;
+ /* This contains all the paravirt structures: we get a convenient
+  * number for each function using the offset which we use to indicate
+diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
+index b4389a4..7024269 100644
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+                                      pmd_t *pmd, pte_t *pte)
+ {
+       paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++      set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++                                     pmd_t *pmd, pte_t *pte)
++{
++      paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+       set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ #ifdef CONFIG_X86_PAE
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
++{
++      pud_populate(mm, pudp, pmd);
++}
+ #else /* !CONFIG_X86_PAE */
+ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+       set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
++      set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
++}
+ #endif        /* CONFIG_X86_PAE */
+ #if PAGETABLE_LEVELS > 3
+@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+       set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ }
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++      paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
++      set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
+index f2b489c..4f7e2e5 100644
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       *pmdp = pmd;
++      pax_close_kernel();
+ }
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index 4cc9f2b..5fd9226 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++      pax_close_kernel();
+ }
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++      pax_open_kernel();
+       set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++      pax_close_kernel();
+ }
+ /*
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 1e67223..92a9585 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd)            native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd)    native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd)                        native_pgd_clear(pgd)
+ #endif
+@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+ #define arch_end_context_switch(prev) do {} while(0)
++#define pax_open_kernel()     native_pax_open_kernel()
++#define pax_close_kernel()    native_pax_close_kernel()
+ #endif        /* CONFIG_PARAVIRT */
++#define  __HAVE_ARCH_PAX_OPEN_KERNEL
++#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++      unsigned long cr0;
++
++      preempt_disable();
++      barrier();
++      cr0 = read_cr0() ^ X86_CR0_WP;
++      BUG_ON(cr0 & X86_CR0_WP);
++      write_cr0(cr0);
++      return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++      unsigned long cr0;
++
++      cr0 = read_cr0() ^ X86_CR0_WP;
++      BUG_ON(!(cr0 & X86_CR0_WP));
++      write_cr0(cr0);
++      barrier();
++      preempt_enable_no_resched();
++      return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+  * The following only work if pte_present() is true.
+  * Undefined behaviour if not..
+  */
++static inline int pte_user(pte_t pte)
++{
++      return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+       return pte_flags(pte) & _PAGE_DIRTY;
+@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+       return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
++static inline unsigned long pgd_pfn(pgd_t pgd)
++{
++      return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+ static inline int pmd_large(pmd_t pte)
+@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+       return pte_clear_flags(pte, _PAGE_RW);
+ }
++static inline pte_t pte_mkread(pte_t pte)
++{
++      return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+-      return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++      if (__supported_pte_mask & _PAGE_NX)
++              return pte_clear_flags(pte, _PAGE_NX);
++      else
++#endif
++              return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++      if (__supported_pte_mask & _PAGE_NX)
++              return pte_set_flags(pte, _PAGE_NX);
++      else
++#endif
++              return pte_clear_flags(pte, _PAGE_USER);
+ }
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+ #endif
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
++enum cpu_pgd_type {kernel = 0, user = 1};
++static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
++{
++      return cpu_pgd[cpu][type];
++}
++#endif
++
+ #include <linux/mm_types.h>
+ #include <linux/log2.h>
+@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+  * Currently stuck as a macro due to indirect forward reference to
+  * linux/mmzone.h's __section_mem_map_addr() definition:
+  */
+-#define pud_page(pud)         pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
++#define pud_page(pud)         pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
+ /* Find an entry in the second-level page table.. */
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+  * Currently stuck as a macro due to indirect forward reference to
+  * linux/mmzone.h's __section_mem_map_addr() definition:
+  */
+-#define pgd_page(pgd)         pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
++#define pgd_page(pgd)         pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
+ /* to find an entry in a page-table-directory. */
+ static inline unsigned long pud_index(unsigned long address)
+@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+ static inline int pgd_bad(pgd_t pgd)
+ {
+-      return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++      return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+ static inline int pgd_none(pgd_t pgd)
+@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
+  * pgd_offset() returns a (pgd_t *)
+  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+  */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
++#endif
++
+ /*
+  * a shortcut which implies the use of the kernel's pgd, instead
+  * of a process's
+@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY   pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS               (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS         KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS         (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#ifdef __ASSEMBLY__
++#define pax_user_shadow_base  pax_user_shadow_base(%rip)
++#else
++extern unsigned long pax_user_shadow_base;
++extern pgdval_t clone_pgd_mask;
++#endif
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+ extern int direct_gbpages;
+@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+  * dst and src can be on the same page, but the range must not overlap,
+  * and must not cross a page boundary.
+  */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+-       memcpy(dst, src, count * sizeof(pgd_t));
++      pax_open_kernel();
++      while (count--)
++              *dst++ = *src++;
++      pax_close_kernel();
+ }
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
++#endif
++
+ #define PTE_SHIFT ilog2(PTRS_PER_PTE)
+ static inline int page_level_shift(enum pg_level level)
+ {
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index 9ee3221..b979c6b 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ # include <asm/pgtable-2level.h>
+ #endif
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address)                                  \
+       ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
+@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr)         \
+ do {                                          \
++      pax_open_kernel();                      \
+       pte_clear(&init_mm, (vaddr), (ptep));   \
++      pax_close_kernel();                     \
+       __flush_tlb_one((vaddr));               \
+ } while (0)
+ #endif /* !__ASSEMBLY__ */
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+  * kern_addr_valid() is (1) for FLATMEM and (0) for
+  * SPARSEMEM and DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
+index ed5903b..c7fe163 100644
+--- a/arch/x86/include/asm/pgtable_32_types.h
++++ b/arch/x86/include/asm/pgtable_32_types.h
+@@ -8,7 +8,7 @@
+  */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE     (1UL << PMD_SHIFT)
++# define PMD_SIZE     (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK     (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+ # define VMALLOC_END  (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr)               (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr)               (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END   VMALLOC_END
+ #define MODULES_LEN   (MODULES_VADDR - MODULES_END)
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index e22c1db..23a625a 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -16,10 +16,14 @@
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+ #define swapper_pg_dir init_level4_pgt
+@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       *pmdp = pmd;
++      pax_close_kernel();
+ }
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++      pax_open_kernel();
+       *pudp = pud;
++      pax_close_kernel();
+ }
+ static inline void native_pud_clear(pud_t *pud)
+@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++      pax_open_kernel();
++      *pgdp = pgd;
++      pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+       *pgdp = pgd;
+ }
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 2d88344..4679fc3 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
+ #define MODULES_END      _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
+ #define EARLY_DYNAMIC_PAGE_TABLES     64
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index e642300..0ef8f31 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -16,13 +16,12 @@
+ #define _PAGE_BIT_PSE         7       /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT         7       /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL      8       /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1     9       /* available for programmer */
++#define _PAGE_BIT_SPECIAL     9       /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP               10      /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN      11      /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE   12      /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL     _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST    _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_SPLITTING   _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
++#define _PAGE_BIT_CPA_TEST    _PAGE_BIT_SPECIAL
++#define _PAGE_BIT_SPLITTING   _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
+ #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -40,7 +39,6 @@
+ #define _PAGE_DIRTY   (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE     (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL  (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP   (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT     (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -57,8 +55,10 @@
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX      (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX      (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX      (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+ #define _PAGE_FILE    (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -116,6 +116,9 @@
+ #define PAGE_READONLY_EXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED)
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC                                            \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL         (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -126,7 +129,7 @@
+ #define __PAGE_KERNEL_WC              (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE         (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS                (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL                (__PAGE_KERNEL_RX | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL                (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR            (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR_NOCACHE    (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_LARGE           (__PAGE_KERNEL | _PAGE_PSE)
+@@ -188,8 +191,8 @@
+  * bits are combined, this will alow user to access the high address mapped
+  * VDSO in the presence of CONFIG_COMPAT_VDSO
+  */
+-#define PTE_IDENT_ATTR         0x003          /* PRESENT+RW */
+-#define PDE_IDENT_ATTR         0x067          /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR         0x063          /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR         0x063          /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR         0x001          /* PRESENT (no other attributes) */
+ #endif
+@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+ {
+       return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+       return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+       return native_pgd_val(pud.pgd);
+@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+       return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+       return native_pgd_val(pmd.pud.pgd);
+@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+ #define pgprot_writecombine   pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 22224b3..b3a2f90 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+           : "memory");
+ }
++/* invpcid (%rdx),%rax */
++#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
++
++#define INVPCID_SINGLE_ADDRESS        0UL
++#define INVPCID_SINGLE_CONTEXT        1UL
++#define INVPCID_ALL_GLOBAL    2UL
++#define INVPCID_ALL_MONGLOBAL 3UL
++
++#define PCID_KERNEL           0UL
++#define PCID_USER             1UL
++#define PCID_NOFLUSH          (1UL << 63)
++
+ static inline void load_cr3(pgd_t *pgdir)
+ {
+-      write_cr3(__pa(pgdir));
++      write_cr3(__pa(pgdir) | PCID_KERNEL);
+ }
+ #ifdef CONFIG_X86_32
+@@ -282,7 +294,7 @@ struct tss_struct {
+ } ____cacheline_aligned;
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+ /*
+  * Save the original ist values for checking stack pointers during debugging
+@@ -452,6 +464,7 @@ struct thread_struct {
+       unsigned short          ds;
+       unsigned short          fsindex;
+       unsigned short          gsindex;
++      unsigned short          ss;
+ #endif
+ #ifdef CONFIG_X86_32
+       unsigned long           ip;
+@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
+ extern unsigned long mmu_cr4_features;
+ extern u32 *trampoline_cr4_features;
+-static inline void set_in_cr4(unsigned long mask)
+-{
+-      unsigned long cr4;
+-
+-      mmu_cr4_features |= mask;
+-      if (trampoline_cr4_features)
+-              *trampoline_cr4_features = mmu_cr4_features;
+-      cr4 = read_cr4();
+-      cr4 |= mask;
+-      write_cr4(cr4);
+-}
+-
+-static inline void clear_in_cr4(unsigned long mask)
+-{
+-      unsigned long cr4;
+-
+-      mmu_cr4_features &= ~mask;
+-      if (trampoline_cr4_features)
+-              *trampoline_cr4_features = mmu_cr4_features;
+-      cr4 = read_cr4();
+-      cr4 &= ~mask;
+-      write_cr4(cr4);
+-}
++extern void set_in_cr4(unsigned long mask);
++extern void clear_in_cr4(unsigned long mask);
+ typedef struct {
+       unsigned long           seg;
+@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
+  */
+ #define TASK_SIZE             PAGE_OFFSET
+ #define TASK_SIZE_MAX         TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE    (TASK_SIZE / 2)
++#define STACK_TOP             ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP             TASK_SIZE
+-#define STACK_TOP_MAX         STACK_TOP
++#endif
++
++#define STACK_TOP_MAX         TASK_SIZE
+ #define INIT_THREAD  {                                                          \
+-      .sp0                    = sizeof(init_stack) + (long)&init_stack, \
++      .sp0                    = sizeof(init_stack) + (long)&init_stack - 8, \
+       .vm86_info              = NULL,                                   \
+       .sysenter_cs            = __KERNEL_CS,                            \
+       .io_bitmap_ptr          = NULL,                                   \
+@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
+  */
+ #define INIT_TSS  {                                                     \
+       .x86_tss = {                                                      \
+-              .sp0            = sizeof(init_stack) + (long)&init_stack, \
++              .sp0            = sizeof(init_stack) + (long)&init_stack - 8, \
+               .ss0            = __KERNEL_DS,                            \
+               .ss1            = __KERNEL_CS,                            \
+               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
+@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info)                                                 \
+-({                                                                     \
+-       unsigned long *__ptr = (unsigned long *)(info);                 \
+-       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
+-})
++#define KSTK_TOP(info)         ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+ /*
+  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define task_pt_regs(task)                                             \
+ ({                                                                     \
+        struct pt_regs *__regs__;                                       \
+-       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++       __regs__ = (struct pt_regs *)((task)->thread.sp0);              \
+        __regs__ - 1;                                                   \
+ })
+@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ /*
+  * User space process size. 47bits minus one guard page.
+  */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+ /* This decides where the kernel will search for a free chunk of vm
+  * space during mmap's.
+  */
+ #define IA32_PAGE_OFFSET      ((current->personality & ADDR_LIMIT_3GB) ? \
+-                                      0xc0000000 : 0xFFFFe000)
++                                      0xc0000000 : 0xFFFFf000)
+ #define TASK_SIZE             (test_thread_flag(TIF_ADDR32) ? \
+                                       IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define STACK_TOP_MAX         TASK_SIZE_MAX
+ #define INIT_THREAD  { \
+-      .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++      .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+ #define INIT_TSS  { \
+-      .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++      .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+ /*
+@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+  */
+ #define TASK_UNMAPPED_BASE    (PAGE_ALIGN(TASK_SIZE / 3))
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE   (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task)                (task_pt_regs(task)->ip)
+ /* Get/set a process' ability to use the timestamp counter instruction */
+@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
+ extern u16 amd_get_nb_id(int cpu);
+ struct aperfmperf {
+-      u64 aperf, mperf;
++      u64 aperf __intentional_overflow(0);
++      u64 mperf __intentional_overflow(0);
+ };
+ static inline void get_aperfmperf(struct aperfmperf *am)
+@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
+       return ratio;
+ }
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+ void default_idle(void);
+@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
+ #define xen_set_default_idle 0
+ #endif
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 942a086..6c26446 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ }
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+  * This is true if V8086 mode was enabled OR if the register set was from
+  * protected mode with RPL-3 CS value.  This tricky test checks that with
+  * one comparison.  Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+  */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+       return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+-      return !!(regs->cs & 3);
++      return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+       return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+               USER_RPL;
+ #else
+-      return user_mode(regs);
++      return user_mode_novm(regs);
+ #endif
+ }
+@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
+ #ifdef CONFIG_X86_64
+ static inline bool user_64bit_mode(struct pt_regs *regs)
+ {
++      unsigned long cs = regs->cs & 0xffff;
+ #ifndef CONFIG_PARAVIRT
+       /*
+        * On non-paravirt systems, this is the only long mode CPL 3
+        * selector.  We do not allow long mode selectors in the LDT.
+        */
+-      return regs->cs == __USER_CS;
++      return cs == __USER_CS;
+ #else
+       /* Headers are too twisted for this to go in paravirt.h. */
+-      return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
++      return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
+ #endif
+ }
+@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
+        * Traps from the kernel do not save sp and ss.
+        * Use the helper function to retrieve sp.
+        */
+-      if (offset == offsetof(struct pt_regs, sp) &&
+-          regs->cs == __KERNEL_CS)
+-              return kernel_stack_pointer(regs);
++      if (offset == offsetof(struct pt_regs, sp)) {
++              unsigned long cs = regs->cs & 0xffff;
++              if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
++                      return kernel_stack_pointer(regs);
++      }
+ #endif
+       return *(unsigned long *)((unsigned long)regs + offset);
+ }
+diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
+index 9c6b890..5305f53 100644
+--- a/arch/x86/include/asm/realmode.h
++++ b/arch/x86/include/asm/realmode.h
+@@ -22,16 +22,14 @@ struct real_mode_header {
+ #endif
+       /* APM/BIOS reboot */
+       u32     machine_real_restart_asm;
+-#ifdef CONFIG_X86_64
+       u32     machine_real_restart_seg;
+-#endif
+ };
+ /* This must match data at trampoline_32/64.S */
+ struct trampoline_header {
+ #ifdef CONFIG_X86_32
+       u32 start;
+-      u16 gdt_pad;
++      u16 boot_cs;
+       u16 gdt_limit;
+       u32 gdt_base;
+ #else
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index a82c4f1..ac45053 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -6,13 +6,13 @@
+ struct pt_regs;
+ struct machine_ops {
+-      void (*restart)(char *cmd);
+-      void (*halt)(void);
+-      void (*power_off)(void);
++      void (* __noreturn restart)(char *cmd);
++      void (* __noreturn halt)(void);
++      void (* __noreturn power_off)(void);
+       void (*shutdown)(void);
+       void (*crash_shutdown)(struct pt_regs *);
+-      void (*emergency_restart)(void);
+-};
++      void (* __noreturn emergency_restart)(void);
++} __no_const;
+ extern struct machine_ops machine_ops;
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index cad82c9..2e5c5c1 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
+ {
+       asm volatile("# beginning down_read\n\t"
+                    LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX _ASM_DEC "(%1)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    /* adds 0x00000001 */
+                    "  jns        1f\n"
+                    "  call call_rwsem_down_read_failed\n"
+@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+                    "1:\n\t"
+                    "  mov          %1,%2\n\t"
+                    "  add          %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   "sub %3,%2\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    "  jle          2f\n\t"
+                    LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
+                    "  jnz          1b\n\t"
+@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+       long tmp;
+       asm volatile("# beginning down_write\n\t"
+                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   "mov %1,(%2)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    /* adds 0xffff0001, returns the old value */
+                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+                    /* was the active mask 0 before? */
+@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
+       long tmp;
+       asm volatile("# beginning __up_read\n\t"
+                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   "mov %1,(%2)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    /* subtracts 1, returns the old value */
+                    "  jns        1f\n\t"
+                    "  call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
+       long tmp;
+       asm volatile("# beginning __up_write\n\t"
+                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   "mov %1,(%2)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    /* subtracts 0xffff0001, returns the old value */
+                    "  jns        1f\n\t"
+                    "  call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+       asm volatile("# beginning __downgrade_write\n\t"
+                    LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    /*
+                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+  */
+ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ {
+-      asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++      asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX _ASM_SUB "%1,%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    : "+m" (sem->count)
+                    : "er" (delta));
+ }
+@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+  */
+ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+ {
+-      return delta + xadd(&sem->count, delta);
++      return delta + xadd_check_overflow(&sem->count, delta);
+ }
+ #endif /* __KERNEL__ */
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index c48a950..bc40804 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -64,10 +64,15 @@
+  *  26 - ESPFIX small SS
+  *  27 - per-cpu                      [ offset to per-cpu data area ]
+  *  28 - stack_canary-20              [ for stack protector ]
+- *  29 - unused
+- *  30 - unused
++ *  29 - PCI BIOS CS
++ *  30 - PCI BIOS DS
+  *  31 - TSS for double fault handler
+  */
++#define GDT_ENTRY_KERNEXEC_EFI_CS     (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS     (2)
++#define __KERNEXEC_EFI_CS     (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS     (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN     6
+ #define GDT_ENTRY_TLS_MAX     (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+@@ -79,6 +84,8 @@
+ #define GDT_ENTRY_KERNEL_CS           (GDT_ENTRY_KERNEL_BASE+0)
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS  (4)
++
+ #define GDT_ENTRY_KERNEL_DS           (GDT_ENTRY_KERNEL_BASE+1)
+ #define GDT_ENTRY_TSS                 (GDT_ENTRY_KERNEL_BASE+4)
+@@ -104,6 +111,12 @@
+ #define __KERNEL_STACK_CANARY         0
+ #endif
++#define GDT_ENTRY_PCIBIOS_CS          (GDT_ENTRY_KERNEL_BASE+17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS          (GDT_ENTRY_KERNEL_BASE+18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS     31
+ /*
+@@ -141,7 +154,7 @@
+  */
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+ #else
+@@ -165,6 +178,8 @@
+ #define __USER32_CS   (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
+ #define __USER32_DS   __USER_DS
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8       /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -173,6 +188,8 @@
+ #define GDT_ENTRY_PER_CPU 15  /* Abused to load per CPU data from limit */
+ #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
++#define GDT_ENTRY_UDEREF_KERNEL_DS 16
++
+ /* TLS indexes for 64bit - hardcoded in arch_prctl */
+ #define FS_TLS 0
+ #define GS_TLS 1
+@@ -180,12 +197,14 @@
+ #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+ #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+-#define GDT_ENTRIES 16
++#define GDT_ENTRIES 17
+ #endif
+ #define __KERNEL_CS   (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS  (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS   (GDT_ENTRY_KERNEL_DS*8)
++#define __UDEREF_KERNEL_DS    (GDT_ENTRY_UDEREF_KERNEL_DS*8)
+ #define __USER_DS     (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS     (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+ #ifndef CONFIG_PARAVIRT
+@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
+ {
+       unsigned long __limit;
+       asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+-      return __limit + 1;
++      return __limit;
+ }
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
+index 8d3120f..352b440 100644
+--- a/arch/x86/include/asm/smap.h
++++ b/arch/x86/include/asm/smap.h
+@@ -25,11 +25,40 @@
+ #include <asm/alternative-asm.h>
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ASM_PAX_OPEN_USERLAND                                 \
++      661: jmp 663f;                                          \
++      .pushsection .altinstr_replacement, "a" ;               \
++      662: pushq %rax; nop;                                   \
++      .popsection ;                                           \
++      .pushsection .altinstructions, "a" ;                    \
++      altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
++      .popsection ;                                           \
++      call __pax_open_userland;                               \
++      popq %rax;                                              \
++      663:
++
++#define ASM_PAX_CLOSE_USERLAND                                        \
++      661: jmp 663f;                                          \
++      .pushsection .altinstr_replacement, "a" ;               \
++      662: pushq %rax; nop;                                   \
++      .popsection;                                            \
++      .pushsection .altinstructions, "a" ;                    \
++      altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
++      .popsection;                                            \
++      call __pax_close_userland;                              \
++      popq %rax;                                              \
++      663:
++#else
++#define ASM_PAX_OPEN_USERLAND
++#define ASM_PAX_CLOSE_USERLAND
++#endif
++
+ #ifdef CONFIG_X86_SMAP
+ #define ASM_CLAC                                                      \
+       661: ASM_NOP3 ;                                                 \
+-      .pushsection .altinstr_replacement, "ax" ;                      \
++      .pushsection .altinstr_replacement, "a" ;                       \
+       662: __ASM_CLAC ;                                               \
+       .popsection ;                                                   \
+       .pushsection .altinstructions, "a" ;                            \
+@@ -38,7 +67,7 @@
+ #define ASM_STAC                                                      \
+       661: ASM_NOP3 ;                                                 \
+-      .pushsection .altinstr_replacement, "ax" ;                      \
++      .pushsection .altinstr_replacement, "a" ;                       \
+       662: __ASM_STAC ;                                               \
+       .popsection ;                                                   \
+       .pushsection .altinstructions, "a" ;                            \
+@@ -56,6 +85,37 @@
+ #include <asm/alternative.h>
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++extern void __pax_open_userland(void);
++static __always_inline unsigned long pax_open_userland(void)
++{
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
++              :
++              : [open] "i" (__pax_open_userland)
++              : "memory", "rax");
++#endif
++
++      return 0;
++}
++
++extern void __pax_close_userland(void);
++static __always_inline unsigned long pax_close_userland(void)
++{
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
++              :
++              : [close] "i" (__pax_close_userland)
++              : "memory", "rax");
++#endif
++
++      return 0;
++}
++
+ #ifdef CONFIG_X86_SMAP
+ static __always_inline void clac(void)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index b073aae..39f9bdd 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
+-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
++DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -79,7 +79,7 @@ struct smp_ops {
+       void (*send_call_func_ipi)(const struct cpumask *mask);
+       void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
+ extern int safe_smp_processor_id(void);
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
+-
+-#define stack_smp_processor_id()                                      \
+-({                                                            \
+-      struct thread_info *ti;                                         \
+-      __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));      \
+-      ti->cpu;                                                        \
+-})
++#define raw_smp_processor_id()                (this_cpu_read(cpu_number))
++#define stack_smp_processor_id()      raw_smp_processor_id()
+ #define safe_smp_processor_id()               smp_processor_id()
+ #endif
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index 33692ea..350a534 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+       asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    "jns 1f\n"
+                    "call __read_lock_failed\n\t"
+                    "1:\n"
+@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+       asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    "jz 1f\n"
+                    "call __write_lock_failed\n\t"
+                    "1:\n"
+@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+-      asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
++      asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    :"+m" (rw->lock) : : "memory");
+ }
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+-      asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
++      asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                   "jno 0f\n"
++                   LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
++                   "int $4\n0:\n"
++                   _ASM_EXTABLE(0b, 0b)
++#endif
++
+                    : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 6a99859..03cb807 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -47,7 +47,7 @@
+  * head_32 for boot CPU and setup_per_cpu_areas() for others.
+  */
+ #define GDT_STACK_CANARY_INIT                                         \
+-      [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++      [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+ /*
+  * Initialize the stackprotector canary value.
+@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
+index 70bbe39..4ae2bd4 100644
+--- a/arch/x86/include/asm/stacktrace.h
++++ b/arch/x86/include/asm/stacktrace.h
+@@ -11,28 +11,20 @@
+ extern int kstack_depth_to_print;
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+-                                    unsigned long *stack,
+-                                    unsigned long bp,
+-                                    const struct stacktrace_ops *ops,
+-                                    void *data,
+-                                    unsigned long *end,
+-                                    int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++                                 void *stack_start,
++                                 unsigned long *stack,
++                                 unsigned long bp,
++                                 const struct stacktrace_ops *ops,
++                                 void *data,
++                                 unsigned long *end,
++                                 int *graph);
+-extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
+-                  unsigned long *stack, unsigned long bp,
+-                  const struct stacktrace_ops *ops, void *data,
+-                  unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
+-                     unsigned long *stack, unsigned long bp,
+-                     const struct stacktrace_ops *ops, void *data,
+-                     unsigned long *end, int *graph);
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+ /* Generic stack tracer with callbacks */
+@@ -40,7 +32,7 @@ struct stacktrace_ops {
+       void (*address)(void *data, unsigned long address, int reliable);
+       /* On negative return stop dumping */
+       int (*stack)(void *data, char *name);
+-      walk_stack_t    walk_stack;
++      walk_stack_t    *walk_stack;
+ };
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
+index 4ec45b3..a4f0a8a 100644
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -108,7 +108,7 @@ do {                                                                       \
+            "call __switch_to\n\t"                                       \
+            "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
+            __switch_canary                                              \
+-           "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
++           "movq "__percpu_arg([thread_info])",%%r8\n\t"                \
+            "movq %%rax,%%rdi\n\t"                                       \
+            "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
+            "jnz   ret_from_fork\n\t"                                    \
+@@ -119,7 +119,7 @@ do {                                                                       \
+              [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+              [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
+              [_tif_fork] "i" (_TIF_FORK),                               \
+-             [thread_info] "i" (offsetof(struct task_struct, stack)),   \
++             [thread_info] "m" (current_tinfo),                         \
+              [current_task] "m" (current_task)                          \
+              __switch_canary_iparam                                     \
+            : "memory", "cc" __EXTRA_CLOBBER)
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index a1df6e8..e002940 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+ /*
+  * low level task data that entry.S needs immediate access to
+@@ -23,7 +24,6 @@ struct exec_domain;
+ #include <linux/atomic.h>
+ struct thread_info {
+-      struct task_struct      *task;          /* main task structure */
+       struct exec_domain      *exec_domain;   /* execution domain */
+       __u32                   flags;          /* low level flags */
+       __u32                   status;         /* thread synchronous flags */
+@@ -33,19 +33,13 @@ struct thread_info {
+       mm_segment_t            addr_limit;
+       struct restart_block    restart_block;
+       void __user             *sysenter_return;
+-#ifdef CONFIG_X86_32
+-      unsigned long           previous_esp;   /* ESP of the previous stack in
+-                                                 case of nested (IRQ) stacks
+-                                              */
+-      __u8                    supervisor_stack[0];
+-#endif
++      unsigned long           lowest_stack;
+       unsigned int            sig_on_uaccess_error:1;
+       unsigned int            uaccess_err:1;  /* uaccess failed */
+ };
+-#define INIT_THREAD_INFO(tsk)                 \
++#define INIT_THREAD_INFO                      \
+ {                                             \
+-      .task           = &tsk,                 \
+       .exec_domain    = &default_exec_domain, \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
+@@ -56,7 +50,7 @@ struct thread_info {
+       },                                      \
+ }
+-#define init_thread_info      (init_thread_union.thread_info)
++#define init_thread_info      (init_thread_union.stack)
+ #define init_stack            (init_thread_union.stack)
+ #else /* !__ASSEMBLY__ */
+@@ -97,6 +91,7 @@ struct thread_info {
+ #define TIF_SYSCALL_TRACEPOINT        28      /* syscall tracepoint instrumentation */
+ #define TIF_ADDR32            29      /* 32-bit address space on 64 bits */
+ #define TIF_X32                       30      /* 32-bit native x86-64 binary */
++#define TIF_GRSEC_SETXID      31      /* update credentials on syscall entry/exit */
+ #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
+@@ -121,17 +116,18 @@ struct thread_info {
+ #define _TIF_SYSCALL_TRACEPOINT       (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_ADDR32           (1 << TIF_ADDR32)
+ #define _TIF_X32              (1 << TIF_X32)
++#define _TIF_GRSEC_SETXID     (1 << TIF_GRSEC_SETXID)
+ /* work to do in syscall_trace_enter() */
+ #define _TIF_WORK_SYSCALL_ENTRY       \
+       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |   \
+        _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |     \
+-       _TIF_NOHZ)
++       _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT        \
+       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP |    \
+-       _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
++       _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK                                                        \
+@@ -142,7 +138,7 @@ struct thread_info {
+ /* work to do on any return to user space */
+ #define _TIF_ALLWORK_MASK                                             \
+       ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |       \
+-      _TIF_NOHZ)
++      _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ /* Only used for 64 bit */
+ #define _TIF_DO_NOTIFY_MASK                                           \
+@@ -158,45 +154,40 @@ struct thread_info {
+ #define PREEMPT_ACTIVE                0x10000000
+-#ifdef CONFIG_X86_32
+-
+-#define STACK_WARN    (THREAD_SIZE/8)
+-/*
+- * macros/functions for gaining access to the thread information structure
+- *
+- * preempt_count needs to be 1 initially, until the scheduler is functional.
+- */
+-#ifndef __ASSEMBLY__
+-
+-
+-/* how to get the current stack pointer from C */
+-register unsigned long current_stack_pointer asm("esp") __used;
+-
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+-      return (struct thread_info *)
+-              (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
++#ifdef __ASSEMBLY__
+ /* how to get the thread information struct from ASM */
+ #define GET_THREAD_INFO(reg)   \
+-      movl $-THREAD_SIZE, reg; \
+-      andl %esp, reg
++      mov PER_CPU_VAR(current_tinfo), reg
+ /* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+-      andl $-THREAD_SIZE, reg
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++      return this_cpu_read_stable(current_tinfo);
++}
++#endif
++
++#ifdef CONFIG_X86_32
++
++#define STACK_WARN    (THREAD_SIZE/8)
++/*
++ * macros/functions for gaining access to the thread information structure
++ *
++ * preempt_count needs to be 1 initially, until the scheduler is functional.
++ */
++#ifndef __ASSEMBLY__
++
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("esp") __used;
+ #endif
+ #else /* X86_32 */
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+  * macros/functions for gaining access to the thread information structure
+  * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+-static inline struct thread_info *current_thread_info(void)
+-{
+-      struct thread_info *ti;
+-      ti = (void *)(this_cpu_read_stable(kernel_stack) +
+-                    KERNEL_STACK_OFFSET - THREAD_SIZE);
+-      return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+-      movq PER_CPU_VAR(kernel_stack),reg ; \
+-      subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
+-/*
+- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
+- * a certain register (to be used in assembler memory operands).
+- */
+-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+ #endif /* !X86_32 */
+@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
+ extern void arch_task_cache_init(void);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ extern void arch_release_task_struct(struct task_struct *tsk);
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task)        (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 50a7fc0..7c437a7 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -17,18 +17,40 @@
+ static inline void __native_flush_tlb(void)
+ {
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              unsigned long descriptor[2];
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
++              return;
++      }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCID)) {
++              unsigned int cpu = raw_get_cpu();
++
++              native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++              native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
++              raw_put_cpu_no_resched();
++              return;
++      }
++#endif
++
+       native_write_cr3(native_read_cr3());
+ }
+ static inline void __native_flush_tlb_global_irq_disabled(void)
+ {
+-      unsigned long cr4;
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              unsigned long descriptor[2];
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
++      } else {
++              unsigned long cr4;
+-      cr4 = native_read_cr4();
+-      /* clear PGE */
+-      native_write_cr4(cr4 & ~X86_CR4_PGE);
+-      /* write old PGE again and flush TLBs */
+-      native_write_cr4(cr4);
++              cr4 = native_read_cr4();
++              /* clear PGE */
++              native_write_cr4(cr4 & ~X86_CR4_PGE);
++              /* write old PGE again and flush TLBs */
++              native_write_cr4(cr4);
++      }
+ }
+ static inline void __native_flush_tlb_global(void)
+@@ -49,6 +71,42 @@ static inline void __native_flush_tlb_global(void)
+ static inline void __native_flush_tlb_single(unsigned long addr)
+ {
++
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              unsigned long descriptor[2];
++
++              descriptor[0] = PCID_KERNEL;
++              descriptor[1] = addr;
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
++                      if (addr < TASK_SIZE_MAX)
++                              descriptor[1] += pax_user_shadow_base;
++                      asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++              }
++
++              descriptor[0] = PCID_USER;
++              descriptor[1] = addr;
++#endif
++
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++              return;
++      }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCID)) {
++              unsigned int cpu = raw_get_cpu();
++
++              native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
++              asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++              native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++              raw_put_cpu_no_resched();
++
++              if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
++                      addr += pax_user_shadow_base;
++      }
++#endif
++
+       asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+ }
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 5ee2687..74590b9 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -7,6 +7,7 @@
+ #include <linux/compiler.h>
+ #include <linux/thread_info.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+ #include <asm/smap.h>
+@@ -29,7 +30,12 @@
+ #define get_ds()      (KERNEL_DS)
+ #define get_fs()      (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x)     (current_thread_info()->addr_limit = (x))
++#endif
+ #define segment_eq(a, b)      ((a).seg == (b).seg)
+@@ -77,8 +83,33 @@
+  * checks that the pointer is in the user space range - after calling
+  * this function, memory access functions may still return -EFAULT.
+  */
+-#define access_ok(type, addr, size) \
+-      (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
++#define access_ok(type, addr, size)                                   \
++({                                                                    \
++      long __size = size;                                             \
++      unsigned long __addr = (unsigned long)addr;                     \
++      unsigned long __addr_ao = __addr & PAGE_MASK;                   \
++      unsigned long __end_ao = __addr + __size - 1;                   \
++      bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
++      if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++              while(__addr_ao <= __end_ao) {                          \
++                      char __c_ao;                                    \
++                      __addr_ao += PAGE_SIZE;                         \
++                      if (__size > PAGE_SIZE)                         \
++                              cond_resched();                         \
++                      if (__get_user(__c_ao, (char __user *)__addr))  \
++                              break;                                  \
++                      if (type != VERIFY_WRITE) {                     \
++                              __addr = __addr_ao;                     \
++                              continue;                               \
++                      }                                               \
++                      if (__put_user(__c_ao, (char __user *)__addr))  \
++                              break;                                  \
++                      __addr = __addr_ao;                             \
++              }                                                       \
++      }                                                               \
++      __ret_ao;                                                       \
++})
+ /*
+  * The exception table consists of pairs of addresses relative to the
+@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+       register __inttype(*(ptr)) __val_gu asm("%edx");                \
+       __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
++      pax_open_userland();                                            \
+       asm volatile("call __get_user_%P3"                              \
+                    : "=a" (__ret_gu), "=r" (__val_gu)                 \
+                    : "0" (ptr), "i" (sizeof(*(ptr))));                \
+       (x) = (__typeof__(*(ptr))) __val_gu;                            \
++      pax_close_userland();                                           \
+       __ret_gu;                                                       \
+ })
+@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+       asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+                    : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret)                      \
+       asm volatile(ASM_STAC "\n"                                      \
+-                   "1:        movl %%eax,0(%2)\n"                     \
+-                   "2:        movl %%edx,4(%2)\n"                     \
++                   "1:        "__copyuser_seg"movl %%eax,0(%2)\n"     \
++                   "2:        "__copyuser_seg"movl %%edx,4(%2)\n"     \
+                    "3: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        movl %3,%0\n"                           \
+@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+ #define __put_user_asm_ex_u64(x, addr)                                        \
+       asm volatile(ASM_STAC "\n"                                      \
+-                   "1:        movl %%eax,0(%1)\n"                     \
+-                   "2:        movl %%edx,4(%1)\n"                     \
++                   "1:        "__copyuser_seg"movl %%eax,0(%1)\n"     \
++                   "2:        "__copyuser_seg"movl %%edx,4(%1)\n"     \
+                    "3: " ASM_CLAC "\n"                                \
+                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    _ASM_EXTABLE_EX(2b, 3b)                            \
+@@ -246,7 +287,8 @@ extern void __put_user_8(void);
+       __typeof__(*(ptr)) __pu_val;                            \
+       __chk_user_ptr(ptr);                                    \
+       might_fault();                                          \
+-      __pu_val = x;                                           \
++      __pu_val = (x);                                         \
++      pax_open_userland();                                    \
+       switch (sizeof(*(ptr))) {                               \
+       case 1:                                                 \
+               __put_user_x(1, __pu_val, ptr, __ret_pu);       \
+@@ -264,6 +306,7 @@ extern void __put_user_8(void);
+               __put_user_x(X, __pu_val, ptr, __ret_pu);       \
+               break;                                          \
+       }                                                       \
++      pax_close_userland();                                   \
+       __ret_pu;                                               \
+ })
+@@ -344,8 +387,10 @@ do {                                                                      \
+ } while (0)
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
++do {                                                                  \
++      pax_open_userland();                                            \
+       asm volatile(ASM_STAC "\n"                                      \
+-                   "1:        mov"itype" %2,%"rtype"1\n"              \
++                   "1:        "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+                    "2: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+@@ -353,8 +398,10 @@ do {                                                                      \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+-                   : "=r" (err), ltype(x)                             \
+-                   : "m" (__m(addr)), "i" (errret), "0" (err))
++                   : "=r" (err), ltype (x)                            \
++                   : "m" (__m(addr)), "i" (errret), "0" (err));       \
++      pax_close_userland();                                           \
++} while (0)
+ #define __get_user_size_ex(x, ptr, size)                              \
+ do {                                                                  \
+@@ -378,7 +425,7 @@ do {                                                                       \
+ } while (0)
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                       \
+-      asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
++      asm volatile("1:        "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+                    "2:\n"                                             \
+                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    : ltype(x) : "m" (__m(addr)))
+@@ -395,13 +442,24 @@ do {                                                                     \
+       int __gu_err;                                                   \
+       unsigned long __gu_val;                                         \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+-      (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
++      (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       __gu_err;                                                       \
+ })
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x)                                      \
++({                                                    \
++      unsigned long ____x = (unsigned long)(x);       \
++      if (____x < pax_user_shadow_base)               \
++              ____x += pax_user_shadow_base;          \
++      (typeof(x))____x;                               \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+ /*
+  * Tell gcc we read from memory instead of writing: this is because
+@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
+  * aliasing issues.
+  */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
++do {                                                                  \
++      pax_open_userland();                                            \
+       asm volatile(ASM_STAC "\n"                                      \
+-                   "1:        mov"itype" %"rtype"1,%2\n"              \
++                   "1:        "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+                    "2: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : "=r"(err)                                        \
+-                   : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++                   : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
++      pax_close_userland();                                           \
++} while (0)
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                       \
+-      asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
++      asm volatile("1:        "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+                    "2:\n"                                             \
+                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    : : ltype(x), "m" (__m(addr)))
+@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
+  */
+ #define uaccess_try   do {                                            \
+       current_thread_info()->uaccess_err = 0;                         \
++      pax_open_userland();                                            \
+       stac();                                                         \
+       barrier();
+ #define uaccess_catch(err)                                            \
+       clac();                                                         \
++      pax_close_userland();                                           \
+       (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
+ } while (0)
+@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
+  * On error, the variable @x is set to zero.
+  */
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr)    get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr)                                            \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+ /**
+  * __put_user: - Write a simple value into user space, with less checking.
+@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
+  * Returns zero on success, or -EFAULT on error.
+  */
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr)    put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr)                                            \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
+ #define get_user_ex(x, ptr)   do {                                    \
+       unsigned long __gue_val;                                        \
+       __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
+-      (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
++      (x) = (__typeof__(*(ptr)))__gue_val;                            \
+ } while (0)
+ #define put_user_try          uaccess_try
+@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
+ extern __must_check long strlen_user(const char __user *str);
+ extern __must_check long strnlen_user(const char __user *str, long n);
+-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
++unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+ /*
+  * movsl can be slow when source and dest are not both 8-byte aligned
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 7f760a9..04b1c65 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -11,15 +11,15 @@
+ #include <asm/page.h>
+ unsigned long __must_check __copy_to_user_ll
+-              (void __user *to, const void *from, unsigned long n);
++              (void __user *to, const void *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll
+-              (void *to, const void __user *from, unsigned long n);
++              (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nozero
+-              (void *to, const void __user *from, unsigned long n);
++              (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache
+-              (void *to, const void __user *from, unsigned long n);
++              (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+-              (void *to, const void __user *from, unsigned long n);
++              (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ /**
+  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
++      check_object_size(from, n, true);
++
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+       might_fault();
++
+       return __copy_to_user_inatomic(to, from, n);
+ }
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       /* Avoid zeroing the tail if the copy fails..
+        * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+        * but as the zeroing behaviour is only significant when n is not
+@@ -137,6 +146,12 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
++      check_object_size(to, n, false);
++
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+                               const void __user *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+@@ -181,15 +200,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+                                 unsigned long n)
+ {
+-       return __copy_from_user_ll_nocache_nozero(to, from, n);
++      if ((long)n < 0)
++              return n;
++
++      return __copy_from_user_ll_nocache_nozero(to, from, n);
+ }
+-unsigned long __must_check copy_to_user(void __user *to,
+-                                      const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+-                                        const void __user *from,
+-                                        unsigned long n);
+-
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++      __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++      __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
+ #endif
+ ;
+-static inline unsigned long __must_check copy_from_user(void *to,
+-                                        const void __user *from,
+-                                        unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to:   Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n:    Number of bytes to copy.
++ *
++ * Context: User context only.  This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+-      int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(from);
+-      if (likely(sz == -1 || sz >= n))
+-              n = _copy_from_user(to, from, n);
+-      else
++      if (unlikely(sz != (size_t)-1 && sz < n))
++              copy_to_user_overflow();
++      else if (access_ok(VERIFY_WRITE, to, n))
++              n = __copy_to_user(to, from, n);
++      return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to:   Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n:    Number of bytes to copy.
++ *
++ * Context: User context only.  This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++      size_t sz = __compiletime_object_size(to);
++
++      check_object_size(to, n, false);
++
++      if (unlikely(sz != (size_t)-1 && sz < n))
+               copy_from_user_overflow();
+-
++      else if (access_ok(VERIFY_READ, from, n))
++              n = __copy_from_user(to, from, n);
++      else if ((long)n > 0)
++              memset(to, 0, n);
+       return n;
+ }
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index 142810c..1f2a0a7 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -10,6 +10,9 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x)     (current_thread_info()->addr_limit = (x))
+ /*
+  * Copy To/From Userspace
+@@ -17,13 +20,13 @@
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
++copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
+ __must_check unsigned long
+-copy_user_generic_string(void *to, const void *from, unsigned len);
++copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
+ __must_check unsigned long
+-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
++copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
+-static __always_inline __must_check unsigned long
++static __always_inline __must_check  __size_overflow(3) unsigned long
+ copy_user_generic(void *to, const void *from, unsigned len)
+ {
+       unsigned ret;
+@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
+                        ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
+                                    "=d" (len)),
+                        "1" (to), "2" (from), "3" (len)
+-                       : "memory", "rcx", "r8", "r9", "r10", "r11");
++                       : "memory", "rcx", "r8", "r9", "r11");
+       return ret;
+ }
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned long len);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned long len);
+ __must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
++
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++      __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++      __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
++
++extern void copy_from_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++      __compiletime_error("copy_from_user() buffer size is not provably correct")
++#else
++      __compiletime_warning("copy_from_user() buffer size is not provably correct")
++#endif
++;
+ static inline unsigned long __must_check copy_from_user(void *to,
+                                         const void __user *from,
+                                         unsigned long n)
+ {
+-      int sz = __compiletime_object_size(to);
+-
+       might_fault();
+-      if (likely(sz == -1 || sz >= n))
+-              n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+-      else
+-              WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++      check_object_size(to, n, false);
++
++      if (access_ok(VERIFY_READ, from, n))
++              n = __copy_from_user(to, from, n);
++      else if (n < INT_MAX)
++              memset(to, 0, n);
+       return n;
+ }
+ static __always_inline __must_check
+-int copy_to_user(void __user *dst, const void *src, unsigned size)
++int copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+       might_fault();
+-      return _copy_to_user(dst, src, size);
++      if (access_ok(VERIFY_WRITE, dst, size))
++              size = __copy_to_user(dst, src, size);
++      return size;
+ }
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+-      int ret = 0;
++      size_t sz = __compiletime_object_size(dst);
++      unsigned ret = 0;
+       might_fault();
++
++      if (size > INT_MAX)
++              return size;
++
++      check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_READ, src, size))
++              return size;
++#endif
++
++      if (unlikely(sz != (size_t)-1 && sz < size)) {
++              copy_from_user_overflow();
++              return size;
++      }
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic(dst, (__force void *)src, size);
++              return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+       switch (size) {
+-      case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++      case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+                             ret, "b", "b", "=q", 1);
+               return ret;
+-      case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++      case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+                             ret, "w", "w", "=r", 2);
+               return ret;
+-      case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++      case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+                             ret, "l", "k", "=r", 4);
+               return ret;
+-      case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++      case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                             ret, "q", "", "=r", 8);
+               return ret;
+       case 10:
+-              __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++              __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                              ret, "q", "", "=r", 10);
+               if (unlikely(ret))
+                       return ret;
+               __get_user_asm(*(u16 *)(8 + (char *)dst),
+-                             (u16 __user *)(8 + (char __user *)src),
++                             (const u16 __user *)(8 + (const char __user *)src),
+                              ret, "w", "w", "=r", 2);
+               return ret;
+       case 16:
+-              __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++              __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                              ret, "q", "", "=r", 16);
+               if (unlikely(ret))
+                       return ret;
+               __get_user_asm(*(u64 *)(8 + (char *)dst),
+-                             (u64 __user *)(8 + (char __user *)src),
++                             (const u64 __user *)(8 + (const char __user *)src),
+                              ret, "q", "", "=r", 8);
+               return ret;
+       default:
+-              return copy_user_generic(dst, (__force void *)src, size);
++              return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+       }
+ }
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+-      int ret = 0;
++      size_t sz = __compiletime_object_size(src);
++      unsigned ret = 0;
+       might_fault();
++
++      if (size > INT_MAX)
++              return size;
++
++      check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_WRITE, dst, size))
++              return size;
++#endif
++
++      if (unlikely(sz != (size_t)-1 && sz < size)) {
++              copy_to_user_overflow();
++              return size;
++      }
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic((__force void *)dst, src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+       switch (size) {
+-      case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++      case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+                             ret, "b", "b", "iq", 1);
+               return ret;
+-      case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++      case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+                             ret, "w", "w", "ir", 2);
+               return ret;
+-      case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++      case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+                             ret, "l", "k", "ir", 4);
+               return ret;
+-      case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++      case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                             ret, "q", "", "er", 8);
+               return ret;
+       case 10:
+-              __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++              __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "er", 10);
+               if (unlikely(ret))
+                       return ret;
+               asm("":::"memory");
+-              __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++              __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+                              ret, "w", "w", "ir", 2);
+               return ret;
+       case 16:
+-              __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++              __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "er", 16);
+               if (unlikely(ret))
+                       return ret;
+               asm("":::"memory");
+-              __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++              __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+                              ret, "q", "", "er", 8);
+               return ret;
+       default:
+-              return copy_user_generic((__force void *)dst, src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+       }
+ }
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
+ {
+-      int ret = 0;
++      unsigned ret = 0;
+       might_fault();
++
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_READ, src, size))
++              return size;
++      if (!__access_ok(VERIFY_WRITE, dst, size))
++              return size;
++#endif
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic((__force void *)dst,
+-                                       (__force void *)src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst),
++                                       (__force_kernel const void *)____m(src), size);
+       switch (size) {
+       case 1: {
+               u8 tmp;
+-              __get_user_asm(tmp, (u8 __user *)src,
++              __get_user_asm(tmp, (const u8 __user *)src,
+                              ret, "b", "b", "=q", 1);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u8 __user *)dst,
+@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       }
+       case 2: {
+               u16 tmp;
+-              __get_user_asm(tmp, (u16 __user *)src,
++              __get_user_asm(tmp, (const u16 __user *)src,
+                              ret, "w", "w", "=r", 2);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u16 __user *)dst,
+@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       case 4: {
+               u32 tmp;
+-              __get_user_asm(tmp, (u32 __user *)src,
++              __get_user_asm(tmp, (const u32 __user *)src,
+                              ret, "l", "k", "=r", 4);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u32 __user *)dst,
+@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       }
+       case 8: {
+               u64 tmp;
+-              __get_user_asm(tmp, (u64 __user *)src,
++              __get_user_asm(tmp, (const u64 __user *)src,
+                              ret, "q", "", "=r", 8);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u64 __user *)dst,
+@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+               return ret;
+       }
+       default:
+-              return copy_user_generic((__force void *)dst,
+-                                       (__force void *)src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst),
++                                       (__force_kernel const void *)____m(src), size);
+       }
+ }
+ static __must_check __always_inline int
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
+ {
+-      return copy_user_generic(dst, (__force const void *)src, size);
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_READ, src, size))
++              return size;
++#endif
++
++      return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+ }
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+-      return copy_user_generic((__force void *)dst, src, size);
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_WRITE, dst, size))
++              return size;
++#endif
++
++      return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ }
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+-                              unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++                              unsigned long size, int zerorest) __size_overflow(3);
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+       might_sleep();
++
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_READ, src, size))
++              return size;
++#endif
++
+       return __copy_user_nocache(dst, src, size, 1);
+ }
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+-                                unsigned size)
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++                                unsigned long size)
+ {
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!__access_ok(VERIFY_READ, src, size))
++              return size;
++#endif
++
+       return __copy_user_nocache(dst, src, size, 0);
+ }
+-unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++extern unsigned long
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
+index 5b238981..77fdd78 100644
+--- a/arch/x86/include/asm/word-at-a-time.h
++++ b/arch/x86/include/asm/word-at-a-time.h
+@@ -11,7 +11,7 @@
+  * and shift, for example.
+  */
+ struct word_at_a_time {
+-      const unsigned long one_bits, high_bits;
++      unsigned long one_bits, high_bits;
+ };
+ #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index d8d9922..bf6cecb 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -129,7 +129,7 @@ struct x86_init_ops {
+       struct x86_init_timers          timers;
+       struct x86_init_iommu           iommu;
+       struct x86_init_pci             pci;
+-};
++} __no_const;
+ /**
+  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
+       void (*setup_percpu_clockev)(void);
+       void (*early_percpu_clock_init)(void);
+       void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
+-};
++} __no_const;
+ /**
+  * struct x86_platform_ops - platform specific runtime functions
+@@ -166,7 +166,7 @@ struct x86_platform_ops {
+       void (*save_sched_clock_state)(void);
+       void (*restore_sched_clock_state)(void);
+       void (*apic_post_init)(void);
+-};
++} __no_const;
+ struct pci_dev;
+ struct msi_msg;
+@@ -180,7 +180,7 @@ struct x86_msi_ops {
+       void (*teardown_msi_irqs)(struct pci_dev *dev);
+       void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+       int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
+-};
++} __no_const;
+ struct IO_APIC_route_entry;
+ struct io_apic_irq_attr;
+@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
+                                      unsigned int destination, int vector,
+                                      struct io_apic_irq_attr *attr);
+       void            (*eoi_ioapic_pin)(int apic, int pin, int vector);
+-};
++} __no_const;
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index 0415cda..3b22adc 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+       if (unlikely(err))
+               return -EFAULT;
++      pax_open_userland();
+       __asm__ __volatile__(ASM_STAC "\n"
+-                           "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
++                           "1:"
++                           __copyuser_seg
++                           ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
+                            "2: " ASM_CLAC "\n"
+                            ".section .fixup,\"ax\"\n"
+                            "3:  movl $-1,%[err]\n"
+@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+                            : [err] "=r" (err)
+                            : "D" (buf), "a" (-1), "d" (-1), "0" (0)
+                            : "memory");
++      pax_close_userland();
+       return err;
+ }
+ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ {
+       int err;
+-      struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
++      struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
++      pax_open_userland();
+       __asm__ __volatile__(ASM_STAC "\n"
+-                           "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
++                           "1:"
++                           __copyuser_seg
++                           ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+                            "2: " ASM_CLAC "\n"
+                            ".section .fixup,\"ax\"\n"
+                            "3:  movl $-1,%[err]\n"
+@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+                            : [err] "=r" (err)
+                            : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
+                            : "memory");       /* memory required? */
++      pax_close_userland();
+       return err;
+ }
+diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
+index bbae024..e1528f9 100644
+--- a/arch/x86/include/uapi/asm/e820.h
++++ b/arch/x86/include/uapi/asm/e820.h
+@@ -63,7 +63,7 @@ struct e820map {
+ #define ISA_START_ADDRESS     0xa0000
+ #define ISA_END_ADDRESS               0x100000
+-#define BIOS_BEGIN            0x000a0000
++#define BIOS_BEGIN            0x000c0000
+ #define BIOS_END              0x00100000
+ #define BIOS_ROM_BASE         0xffe00000
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index 7bd3bd3..5dac791 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -22,7 +22,7 @@ obj-y                        += time.o ioport.o ldt.o dumpstack.o nmi.o
+ obj-y                 += setup.o x86_init.o i8259.o irqinit.o jump_label.o
+ obj-$(CONFIG_IRQ_WORK)  += irq_work.o
+ obj-y                 += probe_roms.o
+-obj-$(CONFIG_X86_32)  += i386_ksyms_32.o
++obj-$(CONFIG_X86_32)  += sys_i386_32.o i386_ksyms_32.o
+ obj-$(CONFIG_X86_64)  += sys_x86_64.o x8664_ksyms_64.o
+ obj-y                 += syscall_$(BITS).o
+ obj-$(CONFIG_X86_64)  += vsyscall_64.o
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 230c8ea..f915130 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+  * If your system is blacklisted here, but you find that acpi=force
+  * works for you, please contact linux-acpi@vger.kernel.org
+  */
+-static struct dmi_system_id __initdata acpi_dmi_table[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table[] = {
+       /*
+        * Boxes that need ACPI disabled
+        */
+@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+ };
+ /* second table for DMI checks that should run after early-quirks */
+-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
+       /*
+        * HP laptops which use a DSDT reporting as HP/SB400/10000,
+        * which includes some code which overrides all temperature
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index ec94e11..7fbbec0 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
+ #else /* CONFIG_64BIT */
+ #ifdef CONFIG_SMP
+       stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++      pax_open_kernel();
+       early_gdt_descr.address =
+                       (unsigned long)get_cpu_gdt_table(smp_processor_id());
++      pax_close_kernel();
++
+       initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+       initial_code = (unsigned long)wakeup_long64;
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index d1daa66..59fecba 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -29,13 +29,11 @@ wakeup_pmode_return:
+       # and restore the stack ... but you need gdt for this to work
+       movl    saved_context_esp, %esp
+-      movl    %cs:saved_magic, %eax
+-      cmpl    $0x12345678, %eax
++      cmpl    $0x12345678, saved_magic
+       jne     bogus_magic
+       # jump to place where we left off
+-      movl    saved_eip, %eax
+-      jmp     *%eax
++      jmp     *(saved_eip)
+ bogus_magic:
+       jmp     bogus_magic
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index c15cf9a..0e63558 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+        */
+       for (a = start; a < end; a++) {
+               instr = (u8 *)&a->instr_offset + a->instr_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
++                      instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+               replacement = (u8 *)&a->repl_offset + a->repl_offset;
+               BUG_ON(a->replacementlen > a->instrlen);
+               BUG_ON(a->instrlen > sizeof(insnbuf));
+@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+       for (poff = start; poff < end; poff++) {
+               u8 *ptr = (u8 *)poff + *poff;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++                      ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+               if (!*poff || ptr < text || ptr >= text_end)
+                       continue;
+               /* turn DS segment override prefix into lock prefix */
+-              if (*ptr == 0x3e)
++              if (*ktla_ktva(ptr) == 0x3e)
+                       text_poke(ptr, ((unsigned char []){0xf0}), 1);
+       }
+       mutex_unlock(&text_mutex);
+@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+       for (poff = start; poff < end; poff++) {
+               u8 *ptr = (u8 *)poff + *poff;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++                      ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+               if (!*poff || ptr < text || ptr >= text_end)
+                       continue;
+               /* turn lock prefix into DS segment override prefix */
+-              if (*ptr == 0xf0)
++              if (*ktla_ktva(ptr) == 0xf0)
+                       text_poke(ptr, ((unsigned char []){0x3E}), 1);
+       }
+       mutex_unlock(&text_mutex);
+@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+               BUG_ON(p->len > MAX_PATCH_LEN);
+               /* prep the buffer with the original instructions */
+-              memcpy(insnbuf, p->instr, p->len);
++              memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+               used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+                                        (unsigned long)p->instr, p->len);
+@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
+       if (!uniproc_patched || num_possible_cpus() == 1)
+               free_init_pages("SMP alternatives",
+                               (unsigned long)__smp_locks,
+-                              (unsigned long)__smp_locks_end);
++                              PAGE_ALIGN((unsigned long)__smp_locks_end));
+ #endif
+       apply_paravirt(__parainstructions, __parainstructions_end);
+@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
+  * instructions. And on the local CPU you need to be protected again NMI or MCE
+  * handlers seeing an inconsistent instruction while you patch.
+  */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+                                             size_t len)
+ {
+       unsigned long flags;
+       local_irq_save(flags);
+-      memcpy(addr, opcode, len);
++
++      pax_open_kernel();
++      memcpy(ktla_ktva(addr), opcode, len);
+       sync_core();
++      pax_close_kernel();
++
+       local_irq_restore(flags);
+       /* Could also do a CLFLUSH here to speed up CPU recovery; but
+          that causes hangs on some VIA CPUs. */
+@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+  */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+-      unsigned long flags;
+-      char *vaddr;
++      unsigned char *vaddr = ktla_ktva(addr);
+       struct page *pages[2];
+-      int i;
++      size_t i;
+       if (!core_kernel_text((unsigned long)addr)) {
+-              pages[0] = vmalloc_to_page(addr);
+-              pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++              pages[0] = vmalloc_to_page(vaddr);
++              pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+       } else {
+-              pages[0] = virt_to_page(addr);
++              pages[0] = virt_to_page(vaddr);
+               WARN_ON(!PageReserved(pages[0]));
+-              pages[1] = virt_to_page(addr + PAGE_SIZE);
++              pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+       }
+       BUG_ON(!pages[0]);
+-      local_irq_save(flags);
+-      set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+-      if (pages[1])
+-              set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+-      vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+-      memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+-      clear_fixmap(FIX_TEXT_POKE0);
+-      if (pages[1])
+-              clear_fixmap(FIX_TEXT_POKE1);
+-      local_flush_tlb();
+-      sync_core();
+-      /* Could also do a CLFLUSH here to speed up CPU recovery; but
+-         that causes hangs on some VIA CPUs. */
++      text_poke_early(addr, opcode, len);
+       for (i = 0; i < len; i++)
+-              BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+-      local_irq_restore(flags);
++              BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+       return addr;
+ }
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 904611b..004dde6 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
+ /*
+  * Debug level, exported for io_apic.c
+  */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+ int pic_mode;
+@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
+       apic_write(APIC_ESR, 0);
+       v1 = apic_read(APIC_ESR);
+       ack_APIC_irq();
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+       apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
+                   smp_processor_id(), v0 , v1);
+diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
+index 00c77cf..2dc6a2d 100644
+--- a/arch/x86/kernel/apic/apic_flat_64.c
++++ b/arch/x86/kernel/apic/apic_flat_64.c
+@@ -157,7 +157,7 @@ static int flat_probe(void)
+       return 1;
+ }
+-static struct apic apic_flat =  {
++static struct apic apic_flat __read_only =  {
+       .name                           = "flat",
+       .probe                          = flat_probe,
+       .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
+@@ -271,7 +271,7 @@ static int physflat_probe(void)
+       return 0;
+ }
+-static struct apic apic_physflat =  {
++static struct apic apic_physflat __read_only =  {
+       .name                           = "physical flat",
+       .probe                          = physflat_probe,
+diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
+index e145f28..2752888 100644
+--- a/arch/x86/kernel/apic/apic_noop.c
++++ b/arch/x86/kernel/apic/apic_noop.c
+@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
+       WARN_ON_ONCE(cpu_has_apic && !disable_apic);
+ }
+-struct apic apic_noop = {
++struct apic apic_noop __read_only = {
+       .name                           = "noop",
+       .probe                          = noop_probe,
+       .acpi_madt_oem_check            = NULL,
+diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
+index d50e364..543bee3 100644
+--- a/arch/x86/kernel/apic/bigsmp_32.c
++++ b/arch/x86/kernel/apic/bigsmp_32.c
+@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
+       return dmi_bigsmp;
+ }
+-static struct apic apic_bigsmp = {
++static struct apic apic_bigsmp __read_only = {
+       .name                           = "bigsmp",
+       .probe                          = probe_bigsmp,
+diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
+index 0874799..a7a7892 100644
+--- a/arch/x86/kernel/apic/es7000_32.c
++++ b/arch/x86/kernel/apic/es7000_32.c
+@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
+       return ret && es7000_apic_is_cluster();
+ }
+-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
+-static struct apic __refdata apic_es7000_cluster = {
++static struct apic apic_es7000_cluster __read_only = {
+       .name                           = "es7000",
+       .probe                          = probe_es7000,
+@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
+       .x86_32_early_logical_apicid    = es7000_early_logical_apicid,
+ };
+-static struct apic __refdata apic_es7000 = {
++static struct apic apic_es7000 __read_only = {
+       .name                           = "es7000",
+       .probe                          = probe_es7000,
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 9ed796c..e930fe4 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+       /* Used to the online set of cpus does not change
+        * during assign_irq_vector.
+@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
+       raw_spin_lock(&vector_lock);
+ }
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+       raw_spin_unlock(&vector_lock);
+ }
+@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
+       ack_APIC_irq();
+ }
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
+@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
+        * at the cpu.
+        */
+       if (!(v & (1 << (i & 0x1f)))) {
+-              atomic_inc(&irq_mis_count);
++              atomic_inc_unchecked(&irq_mis_count);
+               eoi_ioapic_irq(irq, cfg);
+       }
+diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
+index d661ee9..791fd33 100644
+--- a/arch/x86/kernel/apic/numaq_32.c
++++ b/arch/x86/kernel/apic/numaq_32.c
+@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
+               (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+ }
+-/* Use __refdata to keep false positive warning calm.  */
+-static struct apic __refdata apic_numaq = {
++static struct apic apic_numaq __read_only = {
+       .name                           = "NUMAQ",
+       .probe                          = probe_numaq,
+diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
+index eb35ef9..f184a21 100644
+--- a/arch/x86/kernel/apic/probe_32.c
++++ b/arch/x86/kernel/apic/probe_32.c
+@@ -72,7 +72,7 @@ static int probe_default(void)
+       return 1;
+ }
+-static struct apic apic_default = {
++static struct apic apic_default __read_only = {
+       .name                           = "default",
+       .probe                          = probe_default,
+diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
+index 77c95c0..434f8a4 100644
+--- a/arch/x86/kernel/apic/summit_32.c
++++ b/arch/x86/kernel/apic/summit_32.c
+@@ -486,7 +486,7 @@ void setup_summit(void)
+ }
+ #endif
+-static struct apic apic_summit = {
++static struct apic apic_summit __read_only = {
+       .name                           = "summit",
+       .probe                          = probe_summit,
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index c88baa4..757aee1 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block __refdata x2apic_cpu_notifier = {
++static struct notifier_block x2apic_cpu_notifier = {
+       .notifier_call = update_clusterinfo,
+ };
+@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
+               cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
+ }
+-static struct apic apic_x2apic_cluster = {
++static struct apic apic_x2apic_cluster __read_only = {
+       .name                           = "cluster x2apic",
+       .probe                          = x2apic_cluster_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index 562a76d..a003c0f 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
+       return apic == &apic_x2apic_phys;
+ }
+-static struct apic apic_x2apic_phys = {
++static struct apic apic_x2apic_phys __read_only = {
+       .name                           = "physical x2apic",
+       .probe                          = x2apic_phys_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 794f6eb..67e1db2 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -342,7 +342,7 @@ static int uv_probe(void)
+       return apic == &apic_x2apic_uv_x;
+ }
+-static struct apic __refdata apic_x2apic_uv_x = {
++static struct apic apic_x2apic_uv_x __read_only = {
+       .name                           = "UV large system",
+       .probe                          = uv_probe,
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index 53a4e27..038760a 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
+  * This is for buggy BIOS's that refer to (real mode) segment 0x40
+  * even though they are called in protected mode.
+  */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+                       (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+ static const char driver_version[] = "1.16ac";        /* no spaces */
+@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
+       BUG_ON(cpu != 0);
+       gdt = get_cpu_gdt_table(cpu);
+       save_desc_40 = gdt[0x40 / 8];
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       apm_irq_save(flags);
+       APM_DO_SAVE_SEGS;
+@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
+                         &call->esi);
+       APM_DO_RESTORE_SEGS;
+       apm_irq_restore(flags);
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       return call->eax & 0xff;
+@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
+       BUG_ON(cpu != 0);
+       gdt = get_cpu_gdt_table(cpu);
+       save_desc_40 = gdt[0x40 / 8];
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       apm_irq_save(flags);
+       APM_DO_SAVE_SEGS;
+@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
+                                        &call->eax);
+       APM_DO_RESTORE_SEGS;
+       apm_irq_restore(flags);
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       return error;
+ }
+@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
+        * code to that CPU.
+        */
+       gdt = get_cpu_gdt_table(0);
++
++      pax_open_kernel();
+       set_desc_base(&gdt[APM_CS >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+       set_desc_base(&gdt[APM_CS_16 >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+       set_desc_base(&gdt[APM_DS >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++      pax_close_kernel();
+       proc_create("apm", 0, NULL, &apm_file_ops);
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 2861082..6d4718e 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -33,6 +33,8 @@ void common(void) {
+       OFFSET(TI_status, thread_info, status);
+       OFFSET(TI_addr_limit, thread_info, addr_limit);
+       OFFSET(TI_preempt_count, thread_info, preempt_count);
++      OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++      DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+       BLANK();
+       OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -53,8 +55,26 @@ void common(void) {
+       OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+       OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+       OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+ #endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++      OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++      OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
++#endif
++
++#endif
++
++      BLANK();
++      DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++      DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++      DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+       BLANK();
+       OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index e7c798b..2b2019b 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -77,6 +77,7 @@ int main(void)
+       BLANK();
+ #undef ENTRY
++      DEFINE(TSS_size, sizeof(struct tss_struct));
+       OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+       BLANK();
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index b0684e4..22ccfd7 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o               := $(nostackp)
+-
+ obj-y                 := intel_cacheinfo.o scattered.o topology.o
+ obj-y                 += proc.o capflags.o powerflags.o common.o
+ obj-y                 += rdrand.o
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 5013a48..0782c53 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
+                                                       unsigned int size)
+ {
+       /* AMD errata T13 (order #21922) */
+-      if ((c->x86 == 6)) {
++      if (c->x86 == 6) {
+               /* Duron Rev A0 */
+               if (c->x86_model == 3 && c->x86_mask == 0)
+                       size = 64;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 22018f7..df77e23 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+-      /*
+-       * We need valid kernel segments for data and code in long mode too
+-       * IRET will check the segment types  kkeil 2000/10/28
+-       * Also sysret mandates a special GDT layout
+-       *
+-       * TLS descriptors are currently at a different place compared to i386.
+-       * Hopefully nobody expects them at a fixed place (Wine?)
+-       */
+-      [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+-      [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+-      /*
+-       * Segments used for calling PnP BIOS have byte granularity.
+-       * They code segments and data segments have fixed 64k limits,
+-       * the transfer segment sizes are set at run time.
+-       */
+-      /* 32-bit code */
+-      [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+-      /* 16-bit code */
+-      [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
+-      /*
+-       * The APM segments have byte granularity and their bases
+-       * are set at run time.  All have 64k limits.
+-       */
+-      /* 32-bit code */
+-      [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+-      /* 16-bit code */
+-      [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+-      /* data */
+-      [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+-      [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+       setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+               set_in_cr4(X86_CR4_SMAP);
+ }
++#ifdef CONFIG_X86_64
++static __init int setup_disable_pcid(char *arg)
++{
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (clone_pgd_mask != ~(pgdval_t)0UL)
++              pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++#endif
++
++      return 1;
++}
++__setup("nopcid", setup_disable_pcid);
++
++static void setup_pcid(struct cpuinfo_x86 *c)
++{
++      if (!cpu_has(c, X86_FEATURE_PCID)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++              if (clone_pgd_mask != ~(pgdval_t)0UL) {
++                      pax_open_kernel();
++                      pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++                      pax_close_kernel();
++                      printk("PAX: slow and weak UDEREF enabled\n");
++              } else
++                      printk("PAX: UDEREF disabled\n");
++#endif
++
++              return;
++      }
++
++      printk("PAX: PCID detected\n");
++      set_in_cr4(X86_CR4_PCIDE);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_open_kernel();
++      clone_pgd_mask = ~(pgdval_t)0UL;
++      pax_close_kernel();
++      if (pax_user_shadow_base)
++              printk("PAX: weak UDEREF enabled\n");
++      else {
++              set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
++              printk("PAX: strong UDEREF enabled\n");
++      }
++#endif
++
++      if (cpu_has(c, X86_FEATURE_INVPCID))
++              printk("PAX: INVPCID detected\n");
++}
++#endif
++
+ /*
+  * Some CPU features depend on higher CPUID levels, which may not always
+  * be available due to CPUID level capping or broken virtualization
+@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
+ {
+       struct desc_ptr gdt_descr;
+-      gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++      gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+       /* Reload the per-cpu base */
+@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+       setup_smep(c);
+       setup_smap(c);
++#ifdef CONFIG_X86_64
++      setup_pcid(c);
++#endif
++
+       /*
+        * The vendor-specific functions might have changed features.
+        * Now we do "generic changes."
+@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+       /* Filter out anything that depends on CPUID levels we don't have */
+       filter_cpuid_features(c, true);
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++      setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+       /* If the model name is still unset, do table lookup. */
+       if (!c->x86_model_id[0]) {
+               const char *p;
+@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
+-                                  (unsigned long) nmi_idt_table };
++struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
+ DEFINE_PER_CPU_FIRST(union irq_stack_union,
+                    irq_stack_union) __aligned(PAGE_SIZE);
+@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+ EXPORT_PER_CPU_SYMBOL(current_task);
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+-      (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++      (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
+       load_ucode_ap();
+       cpu = stack_smp_processor_id();
+-      t = &per_cpu(init_tss, cpu);
++      t = init_tss + cpu;
+       oist = &per_cpu(orig_ist, cpu);
+ #ifdef CONFIG_NUMA
+@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
+       switch_to_new_gdt(cpu);
+       loadsegment(fs, 0);
+-      load_idt((const struct desc_ptr *)&idt_descr);
++      load_idt(&idt_descr);
+       memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+       syscall_init();
+@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
+       wrmsrl(MSR_KERNEL_GS_BASE, 0);
+       barrier();
+-      x86_configure_nx();
+       enable_x2apic();
+       /*
+@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
+ {
+       int cpu = smp_processor_id();
+       struct task_struct *curr = current;
+-      struct tss_struct *t = &per_cpu(init_tss, cpu);
++      struct tss_struct *t = init_tss + cpu;
+       struct thread_struct *thread = &curr->thread;
+       show_ucode_info_early();
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index 7c6f7d5..8cac382 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
+ };
+ #ifdef CONFIG_AMD_NB
++static struct attribute *default_attrs_amd_nb[] = {
++      &type.attr,
++      &level.attr,
++      &coherency_line_size.attr,
++      &physical_line_partition.attr,
++      &ways_of_associativity.attr,
++      &number_of_sets.attr,
++      &size.attr,
++      &shared_cpu_map.attr,
++      &shared_cpu_list.attr,
++      NULL,
++      NULL,
++      NULL,
++      NULL
++};
++
+ static struct attribute ** __cpuinit amd_l3_attrs(void)
+ {
+       static struct attribute **attrs;
+@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
+       n = ARRAY_SIZE(default_attrs);
+-      if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+-              n += 2;
+-
+-      if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+-              n += 1;
+-
+-      attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
+-      if (attrs == NULL)
+-              return attrs = default_attrs;
+-
+-      for (n = 0; default_attrs[n]; n++)
+-              attrs[n] = default_attrs[n];
++      attrs = default_attrs_amd_nb;
+       if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
+               attrs[n++] = &cache_disable_0.attr;
+@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
+       .default_attrs  = default_attrs,
+ };
++#ifdef CONFIG_AMD_NB
++static struct kobj_type ktype_cache_amd_nb = {
++      .sysfs_ops      = &sysfs_ops,
++      .default_attrs  = default_attrs_amd_nb,
++};
++#endif
++
+ static struct kobj_type ktype_percpu_entry = {
+       .sysfs_ops      = &sysfs_ops,
+ };
+@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
+               return retval;
+       }
++#ifdef CONFIG_AMD_NB
++      amd_l3_attrs();
++#endif
++
+       for (i = 0; i < num_cache_leaves; i++) {
++              struct kobj_type *ktype;
++
+               this_object = INDEX_KOBJECT_PTR(cpu, i);
+               this_object->cpu = cpu;
+               this_object->index = i;
+               this_leaf = CPUID4_INFO_IDX(cpu, i);
+-              ktype_cache.default_attrs = default_attrs;
++              ktype = &ktype_cache;
+ #ifdef CONFIG_AMD_NB
+               if (this_leaf->base.nb)
+-                      ktype_cache.default_attrs = amd_l3_attrs();
++                      ktype = &ktype_cache_amd_nb;
+ #endif
+               retval = kobject_init_and_add(&(this_object->kobj),
+-                                            &ktype_cache,
++                                            ktype,
+                                             per_cpu(ici_cache_kobject, cpu),
+                                             "index%1lu", i);
+               if (unlikely(retval)) {
+@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
++static struct notifier_block cacheinfo_cpu_notifier = {
+       .notifier_call = cacheinfo_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 9239504..b2471ce 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -45,6 +45,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+ #include "mce-internal.h"
+@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
+                       !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+                               m->cs, m->ip);
+-              if (m->cs == __KERNEL_CS)
++              if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+                       print_symbol("{%s}", m->ip);
+               pr_cont("\n");
+       }
+@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+               /*
+                * Make sure only one CPU runs in machine check panic
+                */
+-              if (atomic_inc_return(&mce_paniced) > 1)
++              if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+                       wait_for_panic();
+               barrier();
+@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+               console_verbose();
+       } else {
+               /* Don't log too much for fake panic */
+-              if (atomic_inc_return(&mce_fake_paniced) > 1)
++              if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+                       return;
+       }
+       /* First print corrected ones that are still unlogged */
+@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+       if (!fake_panic) {
+               if (panic_timeout == 0)
+                       panic_timeout = mca_cfg.panic_timeout;
+-              panic(msg);
++              panic("%s", msg);
+       } else
+               pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
+ }
+@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
+        * might have been modified by someone else.
+        */
+       rmb();
+-      if (atomic_read(&mce_paniced))
++      if (atomic_read_unchecked(&mce_paniced))
+               wait_for_panic();
+       if (!mca_cfg.monarch_timeout)
+               goto out;
+@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ }
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+                                               unexpected_machine_check;
+ /*
+@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+               return;
+       }
++      pax_open_kernel();
+       machine_check_vector = do_machine_check;
++      pax_close_kernel();
+       __mcheck_cpu_init_generic();
+       __mcheck_cpu_init_vendor(c);
+@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+  */
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count;     /* #times opened */
++static local_t mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu;     /* already open exclusive? */
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+       spin_lock(&mce_chrdev_state_lock);
+       if (mce_chrdev_open_exclu ||
+-          (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
++          (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
+               spin_unlock(&mce_chrdev_state_lock);
+               return -EBUSY;
+@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+       if (file->f_flags & O_EXCL)
+               mce_chrdev_open_exclu = 1;
+-      mce_chrdev_open_count++;
++      local_inc(&mce_chrdev_open_count);
+       spin_unlock(&mce_chrdev_state_lock);
+@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+ {
+       spin_lock(&mce_chrdev_state_lock);
+-      mce_chrdev_open_count--;
++      local_dec(&mce_chrdev_open_count);
+       mce_chrdev_open_exclu = 0;
+       spin_unlock(&mce_chrdev_state_lock);
+@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+       return NOTIFY_OK;
+ }
+-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
++static struct notifier_block mce_cpu_notifier = {
+       .notifier_call = mce_cpu_callback,
+ };
+@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
+       for (i = 0; i < mca_cfg.banks; i++) {
+               struct mce_bank *b = &mce_banks[i];
+-              struct device_attribute *a = &b->attr;
++              device_attribute_no_const *a = &b->attr;
+               sysfs_attr_init(&a->attr);
+               a->attr.name    = b->attrname;
+@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+       cpu_missing = 0;
+-      atomic_set(&mce_fake_paniced, 0);
++      atomic_set_unchecked(&mce_fake_paniced, 0);
+       atomic_set(&mce_executing, 0);
+       atomic_set(&mce_callin, 0);
+       atomic_set(&global_nwo, 0);
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 1c044b1..37a2a43 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -11,6 +11,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+ /* By default disabled */
+ int mce_p5_enabled __read_mostly;
+@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+       if (!cpu_has(c, X86_FEATURE_MCE))
+               return;
++      pax_open_kernel();
+       machine_check_vector = pentium_machine_check;
++      pax_close_kernel();
+       /* Make sure the vector pointer is visible before we enable MCEs: */
+       wmb();
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 47a1870..8c019a7 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
++static struct notifier_block thermal_throttle_cpu_notifier =
+ {
+       .notifier_call = thermal_throttle_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index e9a701a..35317d6 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+ /* Machine check handler for WinChip C6: */
+ static void winchip_machine_check(struct pt_regs *regs, long error_code)
+@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
+ {
+       u32 lo, hi;
++      pax_open_kernel();
+       machine_check_vector = winchip_machine_check;
++      pax_close_kernel();
+       /* Make sure the vector pointer is visible before we enable MCEs: */
+       wmb();
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index ca22b73..9987afe 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+ const struct mtrr_ops *mtrr_if;
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index df5e41f..816c719 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -25,7 +25,7 @@ struct mtrr_ops {
+       int     (*validate_add_page)(unsigned long base, unsigned long size,
+                                    unsigned int type);
+       int     (*have_wrcomb)(void);
+-};
++} __do_const;
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+                                  int replace_reg);
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 1025f3c..824f677 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
+       pr_info("no hardware sampling interrupt available.\n");
+ }
+-static struct attribute_group x86_pmu_format_group = {
++static attribute_group_no_const x86_pmu_format_group = {
+       .name = "format",
+       .attrs = NULL,
+ };
+@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
+       NULL,
+ };
+-static struct attribute_group x86_pmu_events_group = {
++static attribute_group_no_const x86_pmu_events_group = {
+       .name = "events",
+       .attrs = events_attr,
+ };
+@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
+               if (idx > GDT_ENTRIES)
+                       return 0;
+-              desc = __this_cpu_ptr(&gdt_page.gdt[0]);
++              desc = get_cpu_gdt_table(smp_processor_id());
+       }
+       return get_desc_base(desc + idx);
+@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+                       break;
+               perf_callchain_store(entry, frame.return_address);
+-              fp = frame.next_frame;
++              fp = (const void __force_user *)frame.next_frame;
+       }
+ }
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index a9e2207..d70c83a 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
+        * v2 and above have a perf capabilities MSR
+        */
+       if (version > 1) {
+-              u64 capabilities;
++              u64 capabilities = x86_pmu.intel_cap.capabilities;
+-              rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+-              x86_pmu.intel_cap.capabilities = capabilities;
++              if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
++                      x86_pmu.intel_cap.capabilities = capabilities;
+       }
+       intel_ds_init();
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+index 8aac56b..588fb13 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
+ static int __init uncore_type_init(struct intel_uncore_type *type)
+ {
+       struct intel_uncore_pmu *pmus;
+-      struct attribute_group *attr_group;
++      attribute_group_no_const *attr_group;
+       struct attribute **attrs;
+       int i, j;
+@@ -3518,7 +3518,7 @@ static int
+       return NOTIFY_OK;
+ }
+-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
++static struct notifier_block uncore_cpu_nb = {
+       .notifier_call  = uncore_cpu_notifier,
+       /*
+        * to migrate uncore events, our notifier should be executed
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+index f952891..4722ad4 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+@@ -488,7 +488,7 @@ struct intel_uncore_box {
+ struct uncore_event_desc {
+       struct kobj_attribute attr;
+       const char *config;
+-};
++} __do_const;
+ #define INTEL_UNCORE_EVENT_DESC(_name, _config)                       \
+ {                                                             \
+diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
+index 1e4dbcf..b9a34c2 100644
+--- a/arch/x86/kernel/cpuid.c
++++ b/arch/x86/kernel/cpuid.c
+@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block __refdata cpuid_class_cpu_notifier =
++static struct notifier_block cpuid_class_cpu_notifier =
+ {
+       .notifier_call = cpuid_class_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 74467fe..18793d5 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+       struct pt_regs fixed_regs;
+-#endif
+-#ifdef CONFIG_X86_32
+-      if (!user_mode_vm(regs)) {
++      if (!user_mode(regs)) {
+               crash_fixup_ss_esp(&fixed_regs, regs);
+               regs = &fixed_regs;
+       }
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
+index afa64ad..dce67dd 100644
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+               return -ENOMEM;
+       if (userbuf) {
+-              if (copy_to_user(buf, vaddr + offset, csize)) {
++              if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
+                       iounmap(vaddr);
+                       return -EFAULT;
+               }
+diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
+index 155a13f..1672b9b 100644
+--- a/arch/x86/kernel/doublefault_32.c
++++ b/arch/x86/kernel/doublefault_32.c
+@@ -11,7 +11,7 @@
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+       unsigned long gdt, tss;
+       native_store_gdt(&gdt_desc);
+-      gdt = gdt_desc.address;
++      gdt = (unsigned long)gdt_desc.address;
+       printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+               /* 0x2 bit is always set */
+               .flags          = X86_EFLAGS_SF | 0x2,
+               .sp             = STACK_START,
+-              .es             = __USER_DS,
++              .es             = __KERNEL_DS,
+               .cs             = __KERNEL_CS,
+               .ss             = __KERNEL_DS,
+-              .ds             = __USER_DS,
++              .ds             = __KERNEL_DS,
+               .fs             = __KERNEL_PERCPU,
+               .__cr3          = __pa_nodebug(swapper_pg_dir),
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index deb6421..76bbc12 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -2,6 +2,9 @@
+  *  Copyright (C) 1991, 1992  Linus Torvalds
+  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+                       const struct stacktrace_ops *ops,
+-                      struct thread_info *tinfo, int *graph)
++                      struct task_struct *task, int *graph)
+ {
+-      struct task_struct *task;
+       unsigned long ret_addr;
+       int index;
+       if (addr != (unsigned long)return_to_handler)
+               return;
+-      task = tinfo->task;
+       index = task->curr_ret_stack;
+       if (!task->ret_stack || index < *graph)
+@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+                       const struct stacktrace_ops *ops,
+-                      struct thread_info *tinfo, int *graph)
++                      struct task_struct *task, int *graph)
+ { }
+ #endif
+@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+  */
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+-                      void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+-      void *t = tinfo;
+       if (end) {
+               if (p < end && p >= (end-THREAD_SIZE))
+                       return 1;
+@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
+ }
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end, int *graph)
+ {
+       struct stack_frame *frame = (struct stack_frame *)bp;
+-      while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++      while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+               unsigned long addr;
+               addr = *stack;
+@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
+                       } else {
+                               ops->address(data, addr, 0);
+                       }
+-                      print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++                      print_ftrace_graph_addr(addr, data, ops, task, graph);
+               }
+               stack++;
+       }
+@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
+ EXPORT_SYMBOL_GPL(print_context_stack);
+ unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+                      unsigned long *stack, unsigned long bp,
+                      const struct stacktrace_ops *ops, void *data,
+                      unsigned long *end, int *graph)
+@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+       struct stack_frame *frame = (struct stack_frame *)bp;
+       unsigned long *ret_addr = &frame->return_address;
+-      while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
++      while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+               unsigned long addr = *ret_addr;
+               if (!__kernel_text_address(addr))
+@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+               ops->address(data, addr, 1);
+               frame = frame->next_frame;
+               ret_addr = &frame->return_address;
+-              print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++              print_ftrace_graph_addr(addr, data, ops, task, graph);
+       }
+       return (unsigned long)frame;
+@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
+ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+       touch_nmi_watchdog();
+-      printk(data);
++      printk("%s", (char *)data);
+       printk_address(addr, reliable);
+ }
+@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
+ }
+ EXPORT_SYMBOL_GPL(oops_begin);
++extern void gr_handle_kernel_exploit(void);
++
+ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+       if (regs && kexec_should_crash(current))
+@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
+-      do_exit(signr);
++
++      gr_handle_kernel_exploit();
++
++      do_group_exit(signr);
+ }
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+       print_modules();
+       show_regs(regs);
+ #ifdef CONFIG_X86_32
+-      if (user_mode_vm(regs)) {
++      if (user_mode(regs)) {
+               sp = regs->sp;
+               ss = regs->ss & 0xffff;
+       } else {
+@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+       unsigned long flags = oops_begin();
+       int sig = SIGSEGV;
+-      if (!user_mode_vm(regs))
++      if (!user_mode(regs))
+               report_bug(regs->ip, regs);
+       if (__die(str, regs, err))
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index f2a1770..540657f 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+               bp = stack_frame(task, regs);
+       for (;;) {
+-              struct thread_info *context;
++              void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+-              context = (struct thread_info *)
+-                      ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+-              bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
++              bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+-              stack = (unsigned long *)context->previous_esp;
+-              if (!stack)
++              if (stack_start == task_stack_page(task))
+                       break;
++              stack = *(unsigned long **)stack_start;
+               if (ops->stack(data, "IRQ") < 0)
+                       break;
+               touch_nmi_watchdog();
+@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
+       int i;
+       show_regs_print_info(KERN_EMERG);
+-      __show_regs(regs, !user_mode_vm(regs));
++      __show_regs(regs, !user_mode(regs));
+       /*
+        * When in-kernel, we also print out the stack and code at the
+        * time of the fault..
+        */
+-      if (!user_mode_vm(regs)) {
++      if (!user_mode(regs)) {
+               unsigned int code_prologue = code_bytes * 43 / 64;
+               unsigned int code_len = code_bytes;
+               unsigned char c;
+               u8 *ip;
++              unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
+               pr_emerg("Stack:\n");
+               show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+               pr_emerg("Code:");
+-              ip = (u8 *)regs->ip - code_prologue;
++              ip = (u8 *)regs->ip - code_prologue + cs_base;
+               if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+                       /* try starting at IP */
+-                      ip = (u8 *)regs->ip;
++                      ip = (u8 *)regs->ip + cs_base;
+                       code_len = code_len - code_prologue + 1;
+               }
+               for (i = 0; i < code_len; i++, ip++) {
+@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
+                               pr_cont("  Bad EIP value.");
+                               break;
+                       }
+-                      if (ip == (u8 *)regs->ip)
++                      if (ip == (u8 *)regs->ip + cs_base)
+                               pr_cont(" <%02x>", c);
+                       else
+                               pr_cont(" %02x", c);
+@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+       unsigned short ud2;
++      ip = ktla_ktva(ip);
+       if (ip < PAGE_OFFSET)
+               return 0;
+       if (probe_kernel_address((unsigned short *)ip, ud2))
+@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
+       return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++      unsigned long sp = (unsigned long)&sp, stack_left;
++
++      /* all kernel stacks are of the same size */
++      stack_left = sp & (THREAD_SIZE - 1);
++      BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index addb207..99635fa 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+       unsigned long *irq_stack_end =
+               (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+       unsigned used = 0;
+-      struct thread_info *tinfo;
+       int graph = 0;
+       unsigned long dummy;
++      void *stack_start;
+       if (!task)
+               task = current;
+@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+        * current stack address. If the stacks consist of nested
+        * exceptions
+        */
+-      tinfo = task_thread_info(task);
+       for (;;) {
+               char *id;
+               unsigned long *estack_end;
++
+               estack_end = in_exception_stack(cpu, (unsigned long)stack,
+                                               &used, &id);
+@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+                       if (ops->stack(data, id) < 0)
+                               break;
+-                      bp = ops->walk_stack(tinfo, stack, bp, ops,
++                      bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+                                            data, estack_end, &graph);
+                       ops->stack(data, "<EOE>");
+                       /*
+@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+                        * second-to-last pointer (index -2 to end) in the
+                        * exception stack:
+                        */
++                      if ((u16)estack_end[-1] != __KERNEL_DS)
++                              goto out;
+                       stack = (unsigned long *) estack_end[-2];
+                       continue;
+               }
+@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+                       if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
+                               if (ops->stack(data, "IRQ") < 0)
+                                       break;
+-                              bp = ops->walk_stack(tinfo, stack, bp,
++                              bp = ops->walk_stack(task, irq_stack, stack, bp,
+                                       ops, data, irq_stack_end, &graph);
+                               /*
+                                * We link to the next stack (which would be
+@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+       /*
+        * This handles the process stack:
+        */
+-      bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++      stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++      bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
++out:
+       put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
+       return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++      unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++      unsigned cpu, used;
++      char *id;
++
++      /* check the process stack first */
++      stack_start = (unsigned long)task_stack_page(current);
++      stack_end = stack_start + THREAD_SIZE;
++      if (likely(stack_start <= sp && sp < stack_end)) {
++              unsigned long stack_left = sp & (THREAD_SIZE - 1);
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      cpu = get_cpu();
++
++      /* check the irq stacks */
++      stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++      stack_start = stack_end - IRQ_STACK_SIZE;
++      if (stack_start <= sp && sp < stack_end) {
++              unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++              put_cpu();
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      /* check the exception stacks */
++      used = 0;
++      stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++      stack_start = stack_end - EXCEPTION_STKSZ;
++      if (stack_end && stack_start <= sp && sp < stack_end) {
++              unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++              put_cpu();
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      put_cpu();
++
++      /* unknown stack */
++      BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index d32abea..74daf4f 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
+ static void early_panic(char *msg)
+ {
+-      early_printk(msg);
+-      panic(msg);
++      early_printk("%s", msg);
++      panic("%s", msg);
+ }
+ static int userdef __initdata;
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index d15f575..d692043 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 8f3e2de..6b71e39 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -177,13 +177,153 @@
+       /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+       movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      movl $(__USER_DS), \reg
++#else
++      xorl \reg, \reg
++#endif
++
+       movl \reg, %gs
+ .endm
+ #endif        /* CONFIG_X86_32_LAZY_GS */
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++      call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++      call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++      pushl %eax
++      pushl %ecx
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++      mov %eax, %esi
++#else
++      mov %cr0, %esi
++#endif
++      bts $16, %esi
++      jnc 1f
++      mov %cs, %esi
++      cmp $__KERNEL_CS, %esi
++      jz 3f
++      ljmp $__KERNEL_CS, $3f
++1:    ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++      mov %esi, %eax
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++      mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++      popl %ecx
++      popl %eax
++#endif
++      ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++      pushl %eax
++      pushl %ecx
++#endif
++      mov %cs, %esi
++      cmp $__KERNEXEC_KERNEL_CS, %esi
++      jnz 2f
++#ifdef CONFIG_PARAVIRT
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++      mov %eax, %esi
++#else
++      mov %cr0, %esi
++#endif
++      btr $16, %esi
++      ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++      mov %esi, %eax
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++      mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++      popl %ecx
++      popl %eax
++#endif
++      ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call pax_erase_kstack
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++      pushl %edi
++      pushl %ecx
++      pushl %eax
++
++      mov TI_lowest_stack(%ebp), %edi
++      mov $-0xBEEF, %eax
++      std
++
++1:    mov %edi, %ecx
++      and $THREAD_SIZE_asm - 1, %ecx
++      shr $2, %ecx
++      repne scasl
++      jecxz 2f
++
++      cmp $2*16, %ecx
++      jc 2f
++
++      mov $2*16, %ecx
++      repe scasl
++      jecxz 2f
++      jne 1b
++
++2:    cld
++      mov %esp, %ecx
++      sub %edi, %ecx
++
++      cmp $THREAD_SIZE_asm, %ecx
++      jb 3f
++      ud2
++3:
++
++      shr $2, %ecx
++      rep stosl
++
++      mov TI_task_thread_sp0(%ebp), %edi
++      sub $128, %edi
++      mov %edi, TI_lowest_stack(%ebp)
++
++      popl %eax
++      popl %ecx
++      popl %edi
++      ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+       cld
+       PUSH_GS
+       pushl_cfi %fs
+@@ -206,7 +346,7 @@
+       CFI_REL_OFFSET ecx, 0
+       pushl_cfi %ebx
+       CFI_REL_OFFSET ebx, 0
+-      movl $(__USER_DS), %edx
++      movl $\_DS, %edx
+       movl %edx, %ds
+       movl %edx, %es
+       movl $(__KERNEL_PERCPU), %edx
+@@ -214,6 +354,15 @@
+       SET_KERNEL_GS %edx
+ .endm
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      __SAVE_ALL __KERNEL_DS
++      pax_enter_kernel
++#else
++      __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+       popl_cfi %ebx
+       CFI_RESTORE ebx
+@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
+       popfl_cfi
+       jmp syscall_exit
+       CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+ ENTRY(ret_from_kernel_thread)
+       CFI_STARTPROC
+@@ -344,7 +493,15 @@ ret_from_intr:
+       andl $SEGMENT_RPL_MASK, %eax
+ #endif
+       cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++      jae resume_userspace
++
++      pax_exit_kernel
++      jmp resume_kernel
++#else
+       jb resume_kernel                # not returning to v8086 or userspace
++#endif
+ ENTRY(resume_userspace)
+       LOCKDEP_SYS_EXIT
+@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
+                                       # int/exception return?
+       jne work_pending
+-      jmp restore_all
+-END(ret_from_exception)
++      jmp restore_all_pax
++ENDPROC(ret_from_exception)
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -372,7 +529,7 @@ need_resched:
+       jz restore_all
+       call preempt_schedule_irq
+       jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+       CFI_ENDPROC
+ /*
+@@ -406,30 +563,45 @@ sysenter_past_esp:
+       /*CFI_REL_OFFSET cs, 0*/
+       /*
+        * Push current_thread_info()->sysenter_return to the stack.
+-       * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+-       * pushed above; +8 corresponds to copy_thread's esp0 setting.
+        */
+-      pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
++      pushl_cfi $0
+       CFI_REL_OFFSET eip, 0
+       pushl_cfi %eax
+       SAVE_ALL
++      GET_THREAD_INFO(%ebp)
++      movl TI_sysenter_return(%ebp),%ebp
++      movl %ebp,PT_EIP(%esp)
+       ENABLE_INTERRUPTS(CLBR_NONE)
+ /*
+  * Load the potential sixth argument from user stack.
+  * Careful about security.
+  */
++      movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      mov PT_OLDSS(%esp),%ds
++1:    movl %ds:(%ebp),%ebp
++      push %ss
++      pop %ds
++#else
+       cmpl $__PAGE_OFFSET-3,%ebp
+       jae syscall_fault
+       ASM_STAC
+ 1:    movl (%ebp),%ebp
+       ASM_CLAC
++#endif
++
+       movl %ebp,PT_EBP(%esp)
+       _ASM_EXTABLE(1b,syscall_fault)
+       GET_THREAD_INFO(%ebp)
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+       jnz sysenter_audit
+ sysenter_do_call:
+@@ -444,12 +616,24 @@ sysenter_do_call:
+       testl $_TIF_ALLWORK_MASK, %ecx
+       jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pushl_cfi %eax
++      movl %esp, %eax
++      call pax_randomize_kstack
++      popl_cfi %eax
++#endif
++
++      pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+       movl PT_EIP(%esp), %edx
+       movl PT_OLDESP(%esp), %ecx
+       xorl %ebp,%ebp
+       TRACE_IRQS_ON
+ 1:    mov  PT_FS(%esp), %fs
++2:    mov  PT_DS(%esp), %ds
++3:    mov  PT_ES(%esp), %es
+       PTGS_TO_GS
+       ENABLE_INTERRUPTS_SYSEXIT
+@@ -466,6 +650,9 @@ sysenter_audit:
+       movl %eax,%edx                  /* 2nd arg: syscall number */
+       movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
+       call __audit_syscall_entry
++
++      pax_erase_kstack
++
+       pushl_cfi %ebx
+       movl PT_EAX(%esp),%eax          /* reload syscall number */
+       jmp sysenter_do_call
+@@ -491,10 +678,16 @@ sysexit_audit:
+       CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2:    movl $0,PT_FS(%esp)
++4:    movl $0,PT_FS(%esp)
++      jmp 1b
++5:    movl $0,PT_DS(%esp)
++      jmp 1b
++6:    movl $0,PT_ES(%esp)
+       jmp 1b
+ .popsection
+-      _ASM_EXTABLE(1b,2b)
++      _ASM_EXTABLE(1b,4b)
++      _ASM_EXTABLE(2b,5b)
++      _ASM_EXTABLE(3b,6b)
+       PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -509,6 +702,11 @@ ENTRY(system_call)
+       pushl_cfi %eax                  # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+                                       # system call tracing in operation / emulation
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+       jnz syscall_trace_entry
+@@ -527,6 +725,15 @@ syscall_exit:
+       testl $_TIF_ALLWORK_MASK, %ecx  # current->work
+       jne syscall_exit_work
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      movl %esp, %eax
++      call pax_randomize_kstack
++#endif
++
++      pax_erase_kstack
++
+ restore_all:
+       TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -583,14 +790,34 @@ ldt_ss:
+  * compensating for the offset by changing to the ESPFIX segment with
+  * a base address that matches for the difference.
+  */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+       mov %esp, %edx                  /* load kernel esp */
+       mov PT_OLDESP(%esp), %eax       /* load userspace esp */
+       mov %dx, %ax                    /* eax: new kernel esp */
+       sub %eax, %edx                  /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++      movl PER_CPU_VAR(cpu_number), %ebx
++      shll $PAGE_SHIFT_asm, %ebx
++      addl $cpu_gdt_table, %ebx
++#else
++      movl $cpu_gdt_table, %ebx
++#endif
+       shr $16, %edx
+-      mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+-      mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov %cr0, %esi
++      btr $16, %esi
++      mov %esi, %cr0
++#endif
++
++      mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++      mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      bts $16, %esi
++      mov %esi, %cr0
++#endif
++
+       pushl_cfi $__ESPFIX_SS
+       pushl_cfi %eax                  /* new kernel esp */
+       /* Disable interrupts, but do not irqtrace this section: we
+@@ -619,20 +846,18 @@ work_resched:
+       movl TI_flags(%ebp), %ecx
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
+                                       # than syscall tracing?
+-      jz restore_all
++      jz restore_all_pax
+       testb $_TIF_NEED_RESCHED, %cl
+       jnz work_resched
+ work_notifysig:                               # deal with pending signals and
+                                       # notify-resume requests
++      movl %esp, %eax
+ #ifdef CONFIG_VM86
+       testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+-      movl %esp, %eax
+       jne work_notifysig_v86          # returning to kernel-space or
+                                       # vm86-space
+ 1:
+-#else
+-      movl %esp, %eax
+ #endif
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -653,7 +878,7 @@ work_notifysig_v86:
+       movl %eax, %esp
+       jmp 1b
+ #endif
+-END(work_pending)
++ENDPROC(work_pending)
+       # perform syscall exit tracing
+       ALIGN
+@@ -661,11 +886,14 @@ syscall_trace_entry:
+       movl $-ENOSYS,PT_EAX(%esp)
+       movl %esp, %eax
+       call syscall_trace_enter
++
++      pax_erase_kstack
++
+       /* What it returned is what we'll actually use.  */
+       cmpl $(NR_syscalls), %eax
+       jnae syscall_call
+       jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+       # perform syscall exit tracing
+       ALIGN
+@@ -678,21 +906,25 @@ syscall_exit_work:
+       movl %esp, %eax
+       call syscall_trace_leave
+       jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+       CFI_ENDPROC
+       RING0_INT_FRAME                 # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      push %ss
++      pop %ds
++#endif
+       ASM_CLAC
+       GET_THREAD_INFO(%ebp)
+       movl $-EFAULT,PT_EAX(%esp)
+       jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+ syscall_badsys:
+       movl $-ENOSYS,PT_EAX(%esp)
+       jmp resume_userspace
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+       CFI_ENDPROC
+ /*
+  * End of kprobes section
+@@ -708,8 +940,15 @@ END(syscall_badsys)
+  * normal stack and adjusts ESP with the matching offset.
+  */
+       /* fixup the stack */
+-      mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+-      mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++      movl PER_CPU_VAR(cpu_number), %ebx
++      shll $PAGE_SHIFT_asm, %ebx
++      addl $cpu_gdt_table, %ebx
++#else
++      movl $cpu_gdt_table, %ebx
++#endif
++      mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++      mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+       shl $16, %eax
+       addl %esp, %eax                 /* the adjusted stack pointer */
+       pushl_cfi $__KERNEL_DS
+@@ -762,7 +1001,7 @@ vector=vector+1
+   .endr
+ 2:    jmp common_interrupt
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+ .previous
+ END(interrupt)
+@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
+       pushl_cfi $do_coprocessor_error
+       jmp error_code
+       CFI_ENDPROC
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+ ENTRY(simd_coprocessor_error)
+       RING0_INT_FRAME
+@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
+ .section .altinstructions,"a"
+       altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
+ .previous
+-.section .altinstr_replacement,"ax"
++.section .altinstr_replacement,"a"
+ 663:  pushl $do_simd_coprocessor_error
+ 664:
+ .previous
+@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
+ #endif
+       jmp error_code
+       CFI_ENDPROC
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+ ENTRY(device_not_available)
+       RING0_INT_FRAME
+@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
+       pushl_cfi $do_device_not_available
+       jmp error_code
+       CFI_ENDPROC
+-END(device_not_available)
++ENDPROC(device_not_available)
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+       iret
+       _ASM_EXTABLE(native_iret, iret_exc)
+-END(native_iret)
++ENDPROC(native_iret)
+ ENTRY(native_irq_enable_sysexit)
+       sti
+       sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+ ENTRY(overflow)
+@@ -865,7 +1104,7 @@ ENTRY(overflow)
+       pushl_cfi $do_overflow
+       jmp error_code
+       CFI_ENDPROC
+-END(overflow)
++ENDPROC(overflow)
+ ENTRY(bounds)
+       RING0_INT_FRAME
+@@ -874,7 +1113,7 @@ ENTRY(bounds)
+       pushl_cfi $do_bounds
+       jmp error_code
+       CFI_ENDPROC
+-END(bounds)
++ENDPROC(bounds)
+ ENTRY(invalid_op)
+       RING0_INT_FRAME
+@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
+       pushl_cfi $do_invalid_op
+       jmp error_code
+       CFI_ENDPROC
+-END(invalid_op)
++ENDPROC(invalid_op)
+ ENTRY(coprocessor_segment_overrun)
+       RING0_INT_FRAME
+@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
+       pushl_cfi $do_coprocessor_segment_overrun
+       jmp error_code
+       CFI_ENDPROC
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+ ENTRY(invalid_TSS)
+       RING0_EC_FRAME
+@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
+       pushl_cfi $do_invalid_TSS
+       jmp error_code
+       CFI_ENDPROC
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+ ENTRY(segment_not_present)
+       RING0_EC_FRAME
+@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
+       pushl_cfi $do_segment_not_present
+       jmp error_code
+       CFI_ENDPROC
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+ ENTRY(stack_segment)
+       RING0_EC_FRAME
+@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
+       pushl_cfi $do_stack_segment
+       jmp error_code
+       CFI_ENDPROC
+-END(stack_segment)
++ENDPROC(stack_segment)
+ ENTRY(alignment_check)
+       RING0_EC_FRAME
+@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
+       pushl_cfi $do_alignment_check
+       jmp error_code
+       CFI_ENDPROC
+-END(alignment_check)
++ENDPROC(alignment_check)
+ ENTRY(divide_error)
+       RING0_INT_FRAME
+@@ -933,7 +1172,7 @@ ENTRY(divide_error)
+       pushl_cfi $do_divide_error
+       jmp error_code
+       CFI_ENDPROC
+-END(divide_error)
++ENDPROC(divide_error)
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -943,7 +1182,7 @@ ENTRY(machine_check)
+       pushl_cfi machine_check_vector
+       jmp error_code
+       CFI_ENDPROC
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+ ENTRY(spurious_interrupt_bug)
+@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
+       pushl_cfi $do_spurious_interrupt_bug
+       jmp error_code
+       CFI_ENDPROC
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+ /*
+  * End of kprobes section
+  */
+@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+ ENTRY(mcount)
+       ret
+-END(mcount)
++ENDPROC(mcount)
+ ENTRY(ftrace_caller)
+       cmpl $0, function_trace_stop
+@@ -1096,7 +1335,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+       ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+ ENTRY(ftrace_regs_caller)
+       pushf   /* push flags before compare (in cs location) */
+@@ -1197,7 +1436,7 @@ trace:
+       popl %ecx
+       popl %eax
+       jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
+       popl %ecx
+       popl %eax
+       ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+ .globl return_to_handler
+ return_to_handler:
+@@ -1271,15 +1510,18 @@ error_code:
+       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
+       REG_TO_PTGS %ecx
+       SET_KERNEL_GS %ecx
+-      movl $(__USER_DS), %ecx
++      movl $(__KERNEL_DS), %ecx
+       movl %ecx, %ds
+       movl %ecx, %es
++
++      pax_enter_kernel
++
+       TRACE_IRQS_OFF
+       movl %esp,%eax                  # pt_regs pointer
+       call *%edi
+       jmp ret_from_exception
+       CFI_ENDPROC
+-END(page_fault)
++ENDPROC(page_fault)
+ /*
+  * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1322,7 +1564,7 @@ debug_stack_correct:
+       call do_debug
+       jmp ret_from_exception
+       CFI_ENDPROC
+-END(debug)
++ENDPROC(debug)
+ /*
+  * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1360,6 +1602,9 @@ nmi_stack_correct:
+       xorl %edx,%edx          # zero error code
+       movl %esp,%eax          # pt_regs pointer
+       call do_nmi
++
++      pax_exit_kernel
++
+       jmp restore_all_notrace
+       CFI_ENDPROC
+@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
+       FIXUP_ESPFIX_STACK              # %eax == %esp
+       xorl %edx,%edx                  # zero error code
+       call do_nmi
++
++      pax_exit_kernel
++
+       RESTORE_REGS
+       lss 12+4(%esp), %esp            # back to espfix stack
+       CFI_ADJUST_CFA_OFFSET -24
+       jmp irq_return
+       CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+ ENTRY(int3)
+       RING0_INT_FRAME
+@@ -1414,14 +1662,14 @@ ENTRY(int3)
+       call do_int3
+       jmp ret_from_exception
+       CFI_ENDPROC
+-END(int3)
++ENDPROC(int3)
+ ENTRY(general_protection)
+       RING0_EC_FRAME
+       pushl_cfi $do_general_protection
+       jmp error_code
+       CFI_ENDPROC
+-END(general_protection)
++ENDPROC(general_protection)
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
+       pushl_cfi $do_async_page_fault
+       jmp error_code
+       CFI_ENDPROC
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+ /*
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 7272089..0b74104 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -59,6 +59,8 @@
+ #include <asm/context_tracking.h>
+ #include <asm/smap.h>
+ #include <linux/err.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+ #include <linux/elf-em.h>
+@@ -80,8 +82,9 @@
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(function_hook)
++      pax_force_retaddr
+       retq
+-END(function_hook)
++ENDPROC(function_hook)
+ /* skip is set if stack has been adjusted */
+ .macro ftrace_caller_setup skip=0
+@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
+ #endif
+ GLOBAL(ftrace_stub)
++      pax_force_retaddr
+       retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+ ENTRY(ftrace_regs_caller)
+       /* Save the current flags before compare (in SS location)*/
+@@ -191,7 +195,7 @@ ftrace_restore_flags:
+       popfq
+       jmp  ftrace_stub
+-END(ftrace_regs_caller)
++ENDPROC(ftrace_regs_caller)
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+@@ -212,6 +216,7 @@ ENTRY(function_hook)
+ #endif
+ GLOBAL(ftrace_stub)
++      pax_force_retaddr
+       retq
+ trace:
+@@ -225,12 +230,13 @@ trace:
+ #endif
+       subq $MCOUNT_INSN_SIZE, %rdi
++      pax_force_fptr ftrace_trace_function
+       call   *ftrace_trace_function
+       MCOUNT_RESTORE_FRAME
+       jmp ftrace_stub
+-END(function_hook)
++ENDPROC(function_hook)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
+       MCOUNT_RESTORE_FRAME
++      pax_force_retaddr
+       retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+ GLOBAL(return_to_handler)
+       subq  $24, %rsp
+@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
+       movq 8(%rsp), %rdx
+       movq (%rsp), %rax
+       addq $24, %rsp
++      pax_force_fptr %rdi
+       jmp *%rdi
++ENDPROC(return_to_handler)
+ #endif
+@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
++      .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++      .byte 0x48; ljmp *1234f(%rip)
++      .pushsection .rodata
++      .align 16
++      1234: .quad \off; .word \sel
++      .popsection
++#else
++      pushq $\sel
++      pushq $\off
++      lretq
++#endif
++      .endm
++
++      .macro pax_enter_kernel
++      pax_set_fptr_mask
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      call pax_enter_kernel
++#endif
++      .endm
++
++      .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      call pax_exit_kernel
++#endif
++
++      .endm
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ENTRY(pax_enter_kernel)
++      pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts $16,%rdi
++      jnc 3f
++      mov %cs,%edi
++      cmp $__KERNEL_CS,%edi
++      jnz 2f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      661: jmp 111f
++      .pushsection .altinstr_replacement, "a"
++      662: ASM_NOP2
++      .popsection
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
++      .popsection
++      GET_CR3_INTO_RDI
++      cmp $0,%dil
++      jnz 112f
++      mov $__KERNEL_DS,%edi
++      mov %edi,%ss
++      jmp 111f
++112:  cmp $1,%dil
++      jz 113f
++      ud2
++113:  sub $4097,%rdi
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      mov $__UDEREF_KERNEL_DS,%edi
++      mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq %rdi
++      pax_force_retaddr
++      retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2:    ljmpq __KERNEL_CS,1b
++3:    ljmpq __KERNEXEC_KERNEL_CS,4f
++4:    SET_RDI_INTO_CR0
++      jmp 1b
++#endif
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++      pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov %cs,%rdi
++      cmp $__KERNEXEC_KERNEL_CS,%edi
++      jz 2f
++      GET_CR0_INTO_RDI
++      bts $16,%rdi
++      jnc 4f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      661: jmp 111f
++      .pushsection .altinstr_replacement, "a"
++      662: ASM_NOP2
++      .popsection
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
++      .popsection
++      mov %ss,%edi
++      cmp $__UDEREF_KERNEL_DS,%edi
++      jnz 111f
++      GET_CR3_INTO_RDI
++      cmp $0,%dil
++      jz 112f
++      ud2
++112:  add $4097,%rdi
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      mov $__KERNEL_DS,%edi
++      mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++      popq %rdi
++      pax_force_retaddr
++      retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2:    GET_CR0_INTO_RDI
++      btr $16,%rdi
++      jnc 4f
++      ljmpq __KERNEL_CS,3f
++3:    SET_RDI_INTO_CR0
++      jmp 1b
++4:    ud2
++      jmp 4b
++#endif
++ENDPROC(pax_exit_kernel)
++#endif
++
++      .macro pax_enter_kernel_user
++      pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call pax_enter_kernel_user
++#endif
++      .endm
++
++      .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++      pushq %rax
++      pushq %r11
++      call pax_randomize_kstack
++      popq %r11
++      popq %rax
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++      pushq %rdi
++      pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++      661: jmp 111f
++      .pushsection .altinstr_replacement, "a"
++      662: ASM_NOP2
++      .popsection
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
++      .popsection
++      GET_CR3_INTO_RDI
++      cmp $1,%dil
++      jnz 4f
++      sub $4097,%rdi
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      jmp 3f
++111:
++
++      GET_CR3_INTO_RDI
++      mov %rdi,%rbx
++      add $__START_KERNEL_map,%rbx
++      sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++      cmpl $0, pv_info+PARAVIRT_enabled
++      jz 1f
++      pushq %rdi
++      i = 0
++      .rept USER_PGD_PTRS
++      mov i*8(%rbx),%rsi
++      mov $0,%sil
++      lea i*8(%rbx),%rdi
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++      i = i + 1
++      .endr
++      popq %rdi
++      jmp 2f
++1:
++#endif
++
++      i = 0
++      .rept USER_PGD_PTRS
++      movb $0,i*8(%rbx)
++      i = i + 1
++      .endr
++
++2:    SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts $16,%rdi
++      SET_RDI_INTO_CR0
++#endif
++
++3:
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq %rbx
++      popq %rdi
++      pax_force_retaddr
++      retq
++4:    ud2
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++      pushq %rdi
++      pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++      GET_CR3_INTO_RDI
++      661: jmp 1f
++      .pushsection .altinstr_replacement, "a"
++      662: ASM_NOP2
++      .popsection
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
++      .popsection
++      cmp $0,%dil
++      jnz 3f
++      add $4097,%rdi
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      jmp 2f
++1:
++
++      mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      btr $16,%rdi
++      jnc 3f
++      SET_RDI_INTO_CR0
++#endif
++
++      add $__START_KERNEL_map,%rbx
++      sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++      cmpl $0, pv_info+PARAVIRT_enabled
++      jz 1f
++      i = 0
++      .rept USER_PGD_PTRS
++      mov i*8(%rbx),%rsi
++      mov $0x67,%sil
++      lea i*8(%rbx),%rdi
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++      i = i + 1
++      .endr
++      jmp 2f
++1:
++#endif
++
++      i = 0
++      .rept USER_PGD_PTRS
++      movb $0x67,i*8(%rbx)
++      i = i + 1
++      .endr
++2:
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq %rbx
++      popq %rdi
++      pax_force_retaddr
++      retq
++3:    ud2
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++      .macro pax_enter_kernel_nmi
++      pax_set_fptr_mask
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts $16,%rdi
++      jc 110f
++      SET_RDI_INTO_CR0
++      or $2,%ebx
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      661: jmp 111f
++      .pushsection .altinstr_replacement, "a"
++      662: ASM_NOP2
++      .popsection
++      .pushsection .altinstructions, "a"
++      altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
++      .popsection
++      GET_CR3_INTO_RDI
++      cmp $0,%dil
++      jz 111f
++      sub $4097,%rdi
++      or $4,%ebx
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      mov $__UDEREF_KERNEL_DS,%edi
++      mov %edi,%ss
++111:
++#endif
++      .endm
++
++      .macro pax_exit_kernel_nmi
++#ifdef CONFIG_PAX_KERNEXEC
++      btr $1,%ebx
++      jnc 110f
++      GET_CR0_INTO_RDI
++      btr $16,%rdi
++      SET_RDI_INTO_CR0
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      btr $2,%ebx
++      jnc 111f
++      GET_CR3_INTO_RDI
++      add $4097,%rdi
++      bts $63,%rdi
++      SET_RDI_INTO_CR3
++      mov $__KERNEL_DS,%edi
++      mov %edi,%ss
++111:
++#endif
++      .endm
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call pax_erase_kstack
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++      pushq %rdi
++      pushq %rcx
++      pushq %rax
++      pushq %r11
++
++      GET_THREAD_INFO(%r11)
++      mov TI_lowest_stack(%r11), %rdi
++      mov $-0xBEEF, %rax
++      std
++
++1:    mov %edi, %ecx
++      and $THREAD_SIZE_asm - 1, %ecx
++      shr $3, %ecx
++      repne scasq
++      jecxz 2f
++
++      cmp $2*8, %ecx
++      jc 2f
++
++      mov $2*8, %ecx
++      repe scasq
++      jecxz 2f
++      jne 1b
++
++2:    cld
++      mov %esp, %ecx
++      sub %edi, %ecx
++
++      cmp $THREAD_SIZE_asm, %rcx
++      jb 3f
++      ud2
++3:
++
++      shr $3, %ecx
++      rep stosq
++
++      mov TI_task_thread_sp0(%r11), %rdi
++      sub $256, %rdi
++      mov %rdi, TI_lowest_stack(%r11)
++
++      popq %r11
++      popq %rax
++      popq %rcx
++      popq %rdi
++      pax_force_retaddr
++      ret
++ENDPROC(pax_erase_kstack)
++#endif
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
+       .endm
+       .macro UNFAKE_STACK_FRAME
+-      addq $8*6, %rsp
+-      CFI_ADJUST_CFA_OFFSET   -(6*8)
++      addq $8*6 + ARG_SKIP, %rsp
++      CFI_ADJUST_CFA_OFFSET   -(6*8 + ARG_SKIP)
+       .endm
+ /*
+@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
+       movq %rsp, %rsi
+       leaq -RBP(%rsp),%rdi    /* arg1 for handler */
+-      testl $3, CS-RBP(%rsi)
++      testb $3, CS-RBP(%rsi)
+       je 1f
+       SWAPGS
+       /*
+@@ -498,9 +931,10 @@ ENTRY(save_rest)
+       movq_cfi r15, R15+16
+       movq %r11, 8(%rsp)      /* return address */
+       FIXUP_TOP_OF_STACK %r11, 16
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-END(save_rest)
++ENDPROC(save_rest)
+ /* save complete stack frame */
+       .pushsection .kprobes.text, "ax"
+@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
+       js 1f   /* negative -> in kernel */
+       SWAPGS
+       xorl %ebx,%ebx
+-1:    ret
++1:    pax_force_retaddr_bts
++      ret
+       CFI_ENDPROC
+-END(save_paranoid)
++ENDPROC(save_paranoid)
+       .popsection
+ /*
+@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
+       RESTORE_REST
+-      testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
++      testb $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
+       jz   1f
+       testl $_TIF_IA32, TI_flags(%rcx)        # 32-bit compat task needs IRET
+@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
+       RESTORE_REST
+       jmp int_ret_from_sys_call
+       CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+ /*
+  * System call entry. Up to 6 arguments in registers are supported.
+@@ -608,7 +1043,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+       CFI_STARTPROC   simple
+       CFI_SIGNAL_FRAME
+-      CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
++      CFI_DEF_CFA     rsp,0
+       CFI_REGISTER    rip,rcx
+       /*CFI_REGISTER  rflags,r11*/
+       SWAPGS_UNSAFE_STACK
+@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
+       movq    %rsp,PER_CPU_VAR(old_rsp)
+       movq    PER_CPU_VAR(kernel_stack),%rsp
++      SAVE_ARGS 8*6,0
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * No need to follow this irqs off/on section - it's straight
+        * and short:
+        */
+       ENABLE_INTERRUPTS(CLBR_NONE)
+-      SAVE_ARGS 8,0
+       movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
+       movq  %rcx,RIP-ARGOFFSET(%rsp)
+       CFI_REL_OFFSET rip,RIP-ARGOFFSET
+-      testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      GET_THREAD_INFO(%rcx)
++      testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
+       jnz tracesys
+ system_call_fastpath:
+ #if __SYSCALL_MASK == ~0
+@@ -640,7 +1082,7 @@ system_call_fastpath:
+       cmpl $__NR_syscall_max,%eax
+ #endif
+       ja badsys
+-      movq %r10,%rcx
++      movq R10-ARGOFFSET(%rsp),%rcx
+       call *sys_call_table(,%rax,8)  # XXX:    rip relative
+       movq %rax,RAX-ARGOFFSET(%rsp)
+ /*
+@@ -654,10 +1096,13 @@ sysret_check:
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
++      GET_THREAD_INFO(%rcx)
++      movl TI_flags(%rcx),%edx
+       andl %edi,%edx
+       jnz  sysret_careful
+       CFI_REMEMBER_STATE
++      pax_exit_kernel_user
++      pax_erase_kstack
+       /*
+        * sysretq will re-enable interrupts:
+        */
+@@ -709,14 +1154,18 @@ badsys:
+        * jump back to the normal fast path.
+        */
+ auditsys:
+-      movq %r10,%r9                   /* 6th arg: 4th syscall arg */
++      movq R10-ARGOFFSET(%rsp),%r9    /* 6th arg: 4th syscall arg */
+       movq %rdx,%r8                   /* 5th arg: 3rd syscall arg */
+       movq %rsi,%rcx                  /* 4th arg: 2nd syscall arg */
+       movq %rdi,%rdx                  /* 3rd arg: 1st syscall arg */
+       movq %rax,%rsi                  /* 2nd arg: syscall number */
+       movl $AUDIT_ARCH_X86_64,%edi    /* 1st arg: audit arch */
+       call __audit_syscall_entry
++
++      pax_erase_kstack
++
+       LOAD_ARGS 0             /* reload call-clobbered registers */
++      pax_set_fptr_mask
+       jmp system_call_fastpath
+       /*
+@@ -737,7 +1186,7 @@ sysret_audit:
+       /* Do syscall tracing */
+ tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+-      testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++      testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
+       jz auditsys
+ #endif
+       SAVE_REST
+@@ -745,12 +1194,16 @@ tracesys:
+       FIXUP_TOP_OF_STACK %rdi
+       movq %rsp,%rdi
+       call syscall_trace_enter
++
++      pax_erase_kstack
++
+       /*
+        * Reload arg registers from stack in case ptrace changed them.
+        * We don't reload %rax because syscall_trace_enter() returned
+        * the value it wants us to use in the table lookup.
+        */
+       LOAD_ARGS ARGOFFSET, 1
++      pax_set_fptr_mask
+       RESTORE_REST
+ #if __SYSCALL_MASK == ~0
+       cmpq $__NR_syscall_max,%rax
+@@ -759,7 +1212,7 @@ tracesys:
+       cmpl $__NR_syscall_max,%eax
+ #endif
+       ja   int_ret_from_sys_call      /* RAX(%rsp) set to -ENOSYS above */
+-      movq %r10,%rcx  /* fixup for C */
++      movq R10-ARGOFFSET(%rsp),%rcx   /* fixup for C */
+       call *sys_call_table(,%rax,8)
+       movq %rax,RAX-ARGOFFSET(%rsp)
+       /* Use IRET because user could have changed frame */
+@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
+       andl %edi,%edx
+       jnz   int_careful
+       andl    $~TS_COMPAT,TI_status(%rcx)
+-      jmp   retint_swapgs
++      pax_exit_kernel_user
++      pax_erase_kstack
++      jmp   retint_swapgs_pax
+       /* Either reschedule or signal or syscall exit tracking needed. */
+       /* First do a reschedule test. */
+@@ -826,7 +1281,7 @@ int_restore_rest:
+       TRACE_IRQS_OFF
+       jmp int_with_check
+       CFI_ENDPROC
+-END(system_call)
++ENDPROC(system_call)
+       .macro FORK_LIKE func
+ ENTRY(stub_\func)
+@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
+       DEFAULT_FRAME 0 8               /* offset 8: return address */
+       call sys_\func
+       RESTORE_TOP_OF_STACK %r11, 8
++      pax_force_retaddr
+       ret $REST_SKIP          /* pop extended registers */
+       CFI_ENDPROC
+-END(stub_\func)
++ENDPROC(stub_\func)
+       .endm
+       .macro FIXED_FRAME label,func
+@@ -851,9 +1307,10 @@ ENTRY(\label)
+       FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
+       call \func
+       RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-END(\label)
++ENDPROC(\label)
+       .endm
+       FORK_LIKE  clone
+@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
+       movq_cfi_restore R12+8, r12
+       movq_cfi_restore RBP+8, rbp
+       movq_cfi_restore RBX+8, rbx
++      pax_force_retaddr
+       ret $REST_SKIP          /* pop extended registers */
+       CFI_ENDPROC
+-END(ptregscall_common)
++ENDPROC(ptregscall_common)
+ ENTRY(stub_execve)
+       CFI_STARTPROC
+@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
+       RESTORE_REST
+       jmp int_ret_from_sys_call
+       CFI_ENDPROC
+-END(stub_execve)
++ENDPROC(stub_execve)
+ /*
+  * sigreturn is special because it needs to restore all registers on return.
+@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
+       RESTORE_REST
+       jmp int_ret_from_sys_call
+       CFI_ENDPROC
+-END(stub_rt_sigreturn)
++ENDPROC(stub_rt_sigreturn)
+ #ifdef CONFIG_X86_X32_ABI
+ ENTRY(stub_x32_rt_sigreturn)
+@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
+       RESTORE_REST
+       jmp int_ret_from_sys_call
+       CFI_ENDPROC
+-END(stub_x32_rt_sigreturn)
++ENDPROC(stub_x32_rt_sigreturn)
+ ENTRY(stub_x32_execve)
+       CFI_STARTPROC
+@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
+       RESTORE_REST
+       jmp int_ret_from_sys_call
+       CFI_ENDPROC
+-END(stub_x32_execve)
++ENDPROC(stub_x32_execve)
+ #endif
+@@ -967,7 +1425,7 @@ vector=vector+1
+ 2:    jmp common_interrupt
+ .endr
+       CFI_ENDPROC
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+ .previous
+ END(interrupt)
+@@ -987,6 +1445,16 @@ END(interrupt)
+       subq $ORIG_RAX-RBP, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+       SAVE_ARGS_IRQ
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rdi)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       call \func
+       .endm
+@@ -1019,7 +1487,7 @@ ret_from_intr:
+ exit_intr:
+       GET_THREAD_INFO(%rcx)
+-      testl $3,CS-ARGOFFSET(%rsp)
++      testb $3,CS-ARGOFFSET(%rsp)
+       je retint_kernel
+       /* Interrupt came from user space */
+@@ -1041,12 +1509,16 @@ retint_swapgs:         /* return to user-space */
+        * The iretq could re-enable interrupts:
+        */
+       DISABLE_INTERRUPTS(CLBR_ANY)
++      pax_exit_kernel_user
++retint_swapgs_pax:
+       TRACE_IRQS_IRETQ
+       SWAPGS
+       jmp restore_args
+ retint_restore_args:  /* return to kernel space */
+       DISABLE_INTERRUPTS(CLBR_ANY)
++      pax_exit_kernel
++      pax_force_retaddr (RIP-ARGOFFSET)
+       /*
+        * The iretq could re-enable interrupts:
+        */
+@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
+ #endif
+       CFI_ENDPROC
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+ /*
+  * End of kprobes section
+  */
+@@ -1147,7 +1619,7 @@ ENTRY(\sym)
+       interrupt \do_sym
+       jmp ret_from_intr
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ #ifdef CONFIG_SMP
+@@ -1208,12 +1680,22 @@ ENTRY(\sym)
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+       call error_entry
+       DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rsp)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       movq %rsp,%rdi          /* pt_regs pointer */
+       xorl %esi,%esi          /* no error code */
+       call \do_sym
+       jmp error_exit          /* %ebx: no swapgs flag */
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ .macro paranoidzeroentry sym do_sym
+@@ -1226,15 +1708,25 @@ ENTRY(\sym)
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+       call save_paranoid
+       TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rsp)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       movq %rsp,%rdi          /* pt_regs pointer */
+       xorl %esi,%esi          /* no error code */
+       call \do_sym
+       jmp paranoid_exit       /* %ebx: no swapgs flag */
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+       INTR_FRAME
+@@ -1245,14 +1737,30 @@ ENTRY(\sym)
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+       call save_paranoid
+       TRACE_IRQS_OFF_DEBUG
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rsp)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       movq %rsp,%rdi          /* pt_regs pointer */
+       xorl %esi,%esi          /* no error code */
++#ifdef CONFIG_SMP
++      imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
++      lea init_tss(%r12), %r12
++#else
++      lea init_tss(%rip), %r12
++#endif
+       subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+       call \do_sym
+       addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+       jmp paranoid_exit       /* %ebx: no swapgs flag */
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ .macro errorentry sym do_sym
+@@ -1264,13 +1772,23 @@ ENTRY(\sym)
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+       call error_entry
+       DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rsp)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       movq %rsp,%rdi                  /* pt_regs pointer */
+       movq ORIG_RAX(%rsp),%rsi        /* get error code */
+       movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
+       call \do_sym
+       jmp error_exit                  /* %ebx: no swapgs flag */
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+       /* error code is on the stack already */
+@@ -1284,13 +1802,23 @@ ENTRY(\sym)
+       call save_paranoid
+       DEFAULT_FRAME 0
+       TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb $3, CS(%rsp)
++      jnz 1f
++      pax_enter_kernel
++      jmp 2f
++1:    pax_enter_kernel_user
++2:
++#else
++      pax_enter_kernel
++#endif
+       movq %rsp,%rdi                  /* pt_regs pointer */
+       movq ORIG_RAX(%rsp),%rsi        /* get error code */
+       movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
+       call \do_sym
+       jmp paranoid_exit               /* %ebx: no swapgs flag */
+       CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ zeroentry divide_error do_divide_error
+@@ -1320,9 +1848,10 @@ gs_change:
+ 2:    mfence          /* workaround */
+       SWAPGS
+       popfq_cfi
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+       _ASM_EXTABLE(gs_change,bad_gs)
+       .section .fixup,"ax"
+@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
+       CFI_DEF_CFA_REGISTER    rsp
+       CFI_ADJUST_CFA_OFFSET   -8
+       decl PER_CPU_VAR(irq_count)
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-END(call_softirq)
++ENDPROC(call_softirq)
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
+       decl PER_CPU_VAR(irq_count)
+       jmp  error_exit
+       CFI_ENDPROC
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+ /*
+  * Hypervisor uses this for application faults while it executes.
+@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
+       SAVE_ALL
+       jmp error_exit
+       CFI_ENDPROC
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+ apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
+       xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
+       DEFAULT_FRAME
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF_DEBUG
+-      testl %ebx,%ebx                         /* swapgs needed? */
++      testl $1,%ebx                           /* swapgs needed? */
+       jnz paranoid_restore
+-      testl $3,CS(%rsp)
++      testb $3,CS(%rsp)
+       jnz   paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_exit_kernel
++      TRACE_IRQS_IRETQ 0
++      SWAPGS_UNSAFE_STACK
++      RESTORE_ALL 8
++      pax_force_retaddr_bts
++      jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_exit_kernel_user
++#else
++      pax_exit_kernel
++#endif
+       TRACE_IRQS_IRETQ 0
+       SWAPGS_UNSAFE_STACK
+       RESTORE_ALL 8
+       jmp irq_return
+ paranoid_restore:
++      pax_exit_kernel
+       TRACE_IRQS_IRETQ_DEBUG 0
+       RESTORE_ALL 8
++      pax_force_retaddr_bts
+       jmp irq_return
+ paranoid_userspace:
+       GET_THREAD_INFO(%rcx)
+@@ -1541,7 +2086,7 @@ paranoid_schedule:
+       TRACE_IRQS_OFF
+       jmp paranoid_userspace
+       CFI_ENDPROC
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+ /*
+  * Exception entry point. This expects an error code/orig_rax on the stack.
+@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
+       movq_cfi r14, R14+8
+       movq_cfi r15, R15+8
+       xorl %ebx,%ebx
+-      testl $3,CS+8(%rsp)
++      testb $3,CS+8(%rsp)
+       je error_kernelspace
+ error_swapgs:
+       SWAPGS
+ error_sti:
+       TRACE_IRQS_OFF
++      pax_force_retaddr_bts
+       ret
+ /*
+@@ -1600,7 +2146,7 @@ bstep_iret:
+       movq %rcx,RIP+8(%rsp)
+       jmp error_swapgs
+       CFI_ENDPROC
+-END(error_entry)
++ENDPROC(error_entry)
+ /* ebx:       no swapgs flag (1: don't need swapgs, 0: need it) */
+@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       GET_THREAD_INFO(%rcx)
+-      testl %eax,%eax
++      testl $1,%eax
+       jne retint_kernel
+       LOCKDEP_SYS_EXIT_IRQ
+       movl TI_flags(%rcx),%edx
+@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
+       jnz retint_careful
+       jmp retint_swapgs
+       CFI_ENDPROC
+-END(error_exit)
++ENDPROC(error_exit)
+ /*
+  * Test if a given stack is an NMI stack or not.
+@@ -1678,9 +2224,11 @@ ENTRY(nmi)
+        * If %cs was not the kernel segment, then the NMI triggered in user
+        * space, which means it is definitely not nested.
+        */
++      cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
++      je 1f
+       cmpl $__KERNEL_CS, 16(%rsp)
+       jne first_nmi
+-
++1:
+       /*
+        * Check the special variable on the stack to see if NMIs are
+        * executing.
+@@ -1714,8 +2262,7 @@ nested_nmi:
+ 1:
+       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
+-      leaq -1*8(%rsp), %rdx
+-      movq %rdx, %rsp
++      subq $8, %rsp
+       CFI_ADJUST_CFA_OFFSET 1*8
+       leaq -10*8(%rsp), %rdx
+       pushq_cfi $__KERNEL_DS
+@@ -1733,6 +2280,7 @@ nested_nmi_out:
+       CFI_RESTORE rdx
+       /* No need to check faults here */
++#     pax_force_retaddr_bts
+       INTERRUPT_RETURN
+       CFI_RESTORE_STATE
+@@ -1849,6 +2397,8 @@ end_repeat_nmi:
+        */
+       movq %cr2, %r12
++      pax_enter_kernel_nmi
++
+       /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+       movq %rsp,%rdi
+       movq $-1,%rsi
+@@ -1861,26 +2411,31 @@ end_repeat_nmi:
+       movq %r12, %cr2
+ 1:
+       
+-      testl %ebx,%ebx                         /* swapgs needed? */
++      testl $1,%ebx                           /* swapgs needed? */
+       jnz nmi_restore
+ nmi_swapgs:
+       SWAPGS_UNSAFE_STACK
+ nmi_restore:
++      pax_exit_kernel_nmi
+       /* Pop the extra iret frame at once */
+       RESTORE_ALL 6*8
++      testb $3, 8(%rsp)
++      jnz 1f
++      pax_force_retaddr_bts
++1:
+       /* Clear the NMI executing stack variable */
+       movq $0, 5*8(%rsp)
+       jmp irq_return
+       CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+ ENTRY(ignore_sysret)
+       CFI_STARTPROC
+       mov $-ENOSYS,%eax
+       sysret
+       CFI_ENDPROC
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+ /*
+  * End of kprobes section
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 42a392a..fbbd930 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
+ {
+       unsigned char replaced[MCOUNT_INSN_SIZE];
++      ip = ktla_ktva(ip);
++
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+       unsigned char old[MCOUNT_INSN_SIZE], *new;
+       int ret;
+-      memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++      memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       /* See comment above by declaration of modifying_ftrace_code */
+@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+       /* Also update the regs callback function */
+       if (!ret) {
+               ip = (unsigned long)(&ftrace_regs_call);
+-              memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
++              memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
+               new = ftrace_call_replace(ip, (unsigned long)func);
+               ret = ftrace_modify_code(ip, old, new);
+       }
+@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
+        * kernel identity mapping to modify code.
+        */
+       if (within(ip, (unsigned long)_text, (unsigned long)_etext))
+-              ip = (unsigned long)__va(__pa_symbol(ip));
++              ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
+       return probe_kernel_write((void *)ip, val, size);
+ }
+@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
+       unsigned char replaced[MCOUNT_INSN_SIZE];
+       unsigned char brk = BREAKPOINT_INSTRUCTION;
+-      if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
++      if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
+               return -EFAULT;
+       /* Make sure it is what we expect it to be */
+@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+       return ret;
+  fail_update:
+-      probe_kernel_write((void *)ip, &old_code[0], 1);
++      probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
+       goto out;
+ }
+@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
+ {
+       unsigned char code[MCOUNT_INSN_SIZE];
++      ip = ktla_ktva(ip);
++
+       if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 55b6761..a6456fc 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -67,12 +67,12 @@ again:
+       pgd = *pgd_p;
+       /*
+-       * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
+-       * critical -- __PAGE_OFFSET would point us back into the dynamic
++       * The use of __early_va rather than __va here is critical:
++       * __va would point us back into the dynamic
+        * range and we might end up looping forever...
+        */
+       if (pgd)
+-              pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
++              pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
+       else {
+               if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+                       reset_early_page_tables();
+@@ -82,13 +82,13 @@ again:
+               pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
+               for (i = 0; i < PTRS_PER_PUD; i++)
+                       pud_p[i] = 0;
+-              *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
++              *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
+       }
+       pud_p += pud_index(address);
+       pud = *pud_p;
+       if (pud)
+-              pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
++              pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
+       else {
+               if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+                       reset_early_page_tables();
+@@ -98,7 +98,7 @@ again:
+               pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
+               for (i = 0; i < PTRS_PER_PMD; i++)
+                       pmd_p[i] = 0;
+-              *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
++              *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
+       }
+       pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+       pmd_p[pmd_index(address)] = pmd;
+@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
+       if (console_loglevel == 10)
+               early_printk("Kernel alive\n");
+-      clear_page(init_level4_pgt);
+       /* set init_level4_pgt kernel high mapping*/
+       init_level4_pgt[511] = early_level4_pgt[511];
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 73afd11..0ef46f2 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -26,6 +26,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+  * References to members of the new_cpu_data structure.
+  */
+@@ -55,11 +61,7 @@
+  * and small than max_low_pfn, otherwise will waste some page table entries
+  */
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+  * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
+  * %esi points to the real-mode code as a 32-bit pointer.
+  * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+  * can.
+  */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++      jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+       movl pa(stack_start),%ecx
+       
+@@ -106,6 +121,59 @@ ENTRY(startup_32)
+ 2:
+       leal -__PAGE_OFFSET(%ecx),%esp
++#ifdef CONFIG_SMP
++      movl $pa(cpu_gdt_table),%edi
++      movl $__per_cpu_load,%eax
++      movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
++      movl $__per_cpu_end - 1,%eax
++      subl $__per_cpu_start,%eax
++      movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      movl $NR_CPUS,%ecx
++      movl $pa(cpu_gdt_table),%edi
++1:
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++      addl $PAGE_SIZE_asm,%edi
++      loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      movl $pa(boot_gdt),%edi
++      movl $__LOAD_PHYSICAL_ADDR,%eax
++      movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
++      rorl $16,%eax
++
++      ljmp $(__BOOT_CS),$1f
++1:
++
++      movl $NR_CPUS,%ecx
++      movl $pa(cpu_gdt_table),%edi
++      addl $__PAGE_OFFSET,%eax
++1:
++      movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
++      movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
++      movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
++      movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
++      movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
++      movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
++      rorl $16,%eax
++      addl $PAGE_SIZE_asm,%edi
++      loop 1b
++#endif
++
+ /*
+  * Clear BSS first so that there are no surprises...
+  */
+@@ -201,8 +269,11 @@ ENTRY(startup_32)
+       movl %eax, pa(max_pfn_mapped)
+       /* Do early initialization of the fixmap area */
+-      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+-      movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+       movl %eax, pa(max_pfn_mapped)
+       /* Do early initialization of the fixmap area */
+-      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+-      movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+ #ifdef CONFIG_PARAVIRT
+@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+       cmpl $num_subarch_entries, %eax
+       jae bad_subarch
+-      movl pa(subarch_entries)(,%eax,4), %eax
+-      subl $__PAGE_OFFSET, %eax
+-      jmp *%eax
++      jmp *pa(subarch_entries)(,%eax,4)
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -261,10 +333,10 @@ WEAK(xen_entry)
+       __INITDATA
+ subarch_entries:
+-      .long default_entry             /* normal x86/PC */
+-      .long lguest_entry              /* lguest hypervisor */
+-      .long xen_entry                 /* Xen hypervisor */
+-      .long default_entry             /* Moorestown MID */
++      .long ta(default_entry)         /* normal x86/PC */
++      .long ta(lguest_entry)          /* lguest hypervisor */
++      .long ta(xen_entry)             /* Xen hypervisor */
++      .long ta(default_entry)         /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -355,6 +427,7 @@ default_entry:
+       movl pa(mmu_cr4_features),%eax
+       movl %eax,%cr4
++#ifdef CONFIG_X86_PAE
+       testb $X86_CR4_PAE, %al         # check if PAE is enabled
+       jz enable_paging
+@@ -383,6 +456,9 @@ default_entry:
+       /* Make changes effective */
+       wrmsr
++      btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ enable_paging:
+ /*
+@@ -451,14 +527,20 @@ is486:
+ 1:    movl $(__KERNEL_DS),%eax        # reload all the segment registers
+       movl %eax,%ss                   # after changing gdt.
+-      movl $(__USER_DS),%eax          # DS/ES contains default USER segment
++#     movl $(__KERNEL_DS),%eax        # DS/ES contains default KERNEL segment
+       movl %eax,%ds
+       movl %eax,%es
+       movl $(__KERNEL_PERCPU), %eax
+       movl %eax,%fs                   # set this cpu's percpu
++#ifdef CONFIG_CC_STACKPROTECTOR
+       movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      movl $(__USER_DS),%eax
++#else
++      xorl %eax,%eax
++#endif
+       movl %eax,%gs
+       xorl %eax,%eax                  # Clear LDT
+@@ -534,8 +616,11 @@ setup_once:
+        * relocation.  Manually set base address in stack canary
+        * segment descriptor.
+        */
+-      movl $gdt_page,%eax
++      movl $cpu_gdt_table,%eax
+       movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++      addl $__per_cpu_load,%ecx
++#endif
+       movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+       shrl $16, %ecx
+       movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
+       /* This is global to keep gas from relaxing the jumps */
+ ENTRY(early_idt_handler)
+       cld
+-      cmpl $2,%ss:early_recursion_flag
++      cmpl $1,%ss:early_recursion_flag
+       je hlt_loop
+       incl %ss:early_recursion_flag
+@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
+       pushl (20+6*4)(%esp)    /* trapno */
+       pushl $fault_msg
+       call printk
+-#endif
+       call dump_stack
++#endif
+ hlt_loop:
+       hlt
+       jmp hlt_loop
+@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
+ /* This is the default interrupt "handler" :-) */
+       ALIGN
+ ignore_int:
+-      cld
+ #ifdef CONFIG_PRINTK
++      cmpl $2,%ss:early_recursion_flag
++      je hlt_loop
++      incl %ss:early_recursion_flag
++      cld
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+@@ -634,9 +722,6 @@ ignore_int:
+       movl $(__KERNEL_DS),%eax
+       movl %eax,%ds
+       movl %eax,%es
+-      cmpl $2,early_recursion_flag
+-      je hlt_loop
+-      incl early_recursion_flag
+       pushl 16(%esp)
+       pushl 24(%esp)
+       pushl 32(%esp)
+@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
+ /*
+  * BSS section
+  */
+-__PAGE_ALIGNED_BSS
+-      .align PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
++.section .initial_pg_pmd,"a",@progbits
+ initial_pg_pmd:
+       .fill 1024*KPMDS,4,0
+ #else
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+       .fill 1024,4,0
+ #endif
++.section .initial_pg_fixmap,"a",@progbits
+ initial_pg_fixmap:
+       .fill 1024,4,0
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+       .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
++#ifdef CONFIG_X86_PAE
++      .fill 4,8,0
++#else
+       .fill 1024,4,0
++#endif
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++      .fill 256,8,0
+ /*
+  * This starts the data section.
+  */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+-      /* Page-aligned for the benefit of paravirt? */
+-      .align PAGE_SIZE
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+       .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
+ # if KPMDS == 3
+@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
+ #  error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+       .align PAGE_SIZE                /* needs to be page-sized too */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++      .rept 2*NR_CPUS
++      .fill   4,8,0
++      .endr
++#endif
++
+ #endif
+ .data
+ .balign 4
+ ENTRY(stack_start)
+-      .long init_thread_union+THREAD_SIZE
++      .long init_thread_union+THREAD_SIZE-8
+ __INITRODATA
+ int_msg:
+@@ -744,7 +851,7 @@ fault_msg:
+  * segment size, and 32-bit linear address value:
+  */
+-      .data
++.section .rodata,"a",@progbits
+ .globl boot_gdt_descr
+ .globl idt_descr
+@@ -753,7 +860,7 @@ fault_msg:
+       .word 0                         # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+       .word __BOOT_DS+7
+-      .long boot_gdt - __PAGE_OFFSET
++      .long pa(boot_gdt)
+       .word 0                         # 32-bit align idt_desc.address
+ idt_descr:
+@@ -764,7 +871,7 @@ idt_descr:
+       .word 0                         # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+       .word GDT_ENTRIES*8-1
+-      .long gdt_page                  /* Overwritten for secondary CPUs */
++      .long cpu_gdt_table             /* Overwritten for secondary CPUs */
+ /*
+  * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
+       .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+       .fill GDT_ENTRY_BOOT_CS,8,0
+-      .quad 0x00cf9a000000ffff        /* kernel 4GB code at 0x00000000 */
+-      .quad 0x00cf92000000ffff        /* kernel 4GB data at 0x00000000 */
++      .quad 0x00cf9b000000ffff        /* kernel 4GB code at 0x00000000 */
++      .quad 0x00cf93000000ffff        /* kernel 4GB data at 0x00000000 */
++
++      .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++      .rept NR_CPUS
++      .quad 0x0000000000000000        /* NULL descriptor */
++      .quad 0x0000000000000000        /* 0x0b reserved */
++      .quad 0x0000000000000000        /* 0x13 reserved */
++      .quad 0x0000000000000000        /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .quad 0x00cf9b000000ffff        /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++      .quad 0x0000000000000000        /* 0x20 unused */
++#endif
++
++      .quad 0x0000000000000000        /* 0x28 unused */
++      .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
++      .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
++      .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
++      .quad 0x0000000000000000        /* 0x4b reserved */
++      .quad 0x0000000000000000        /* 0x53 reserved */
++      .quad 0x0000000000000000        /* 0x5b reserved */
++
++      .quad 0x00cf9b000000ffff        /* 0x60 kernel 4GB code at 0x00000000 */
++      .quad 0x00cf93000000ffff        /* 0x68 kernel 4GB data at 0x00000000 */
++      .quad 0x00cffb000000ffff        /* 0x73 user 4GB code at 0x00000000 */
++      .quad 0x00cff3000000ffff        /* 0x7b user 4GB data at 0x00000000 */
++
++      .quad 0x0000000000000000        /* 0x80 TSS descriptor */
++      .quad 0x0000000000000000        /* 0x88 LDT descriptor */
++
++      /*
++       * Segments used for calling PnP BIOS have byte granularity.
++       * The code segments and data segments have fixed 64k limits,
++       * the transfer segment sizes are set at run time.
++       */
++      .quad 0x00409b000000ffff        /* 0x90 32-bit code */
++      .quad 0x00009b000000ffff        /* 0x98 16-bit code */
++      .quad 0x000093000000ffff        /* 0xa0 16-bit data */
++      .quad 0x0000930000000000        /* 0xa8 16-bit data */
++      .quad 0x0000930000000000        /* 0xb0 16-bit data */
++
++      /*
++       * The APM segments have byte granularity and their bases
++       * are set at run time.  All have 64k limits.
++       */
++      .quad 0x00409b000000ffff        /* 0xb8 APM CS    code */
++      .quad 0x00009b000000ffff        /* 0xc0 APM CS 16 code (16 bit) */
++      .quad 0x004093000000ffff        /* 0xc8 APM DS    data */
++
++      .quad 0x00c0930000000000        /* 0xd0 - ESPFIX SS */
++      .quad 0x0040930000000000        /* 0xd8 - PERCPU */
++      .quad 0x0040910000000017        /* 0xe0 - STACK_CANARY */
++      .quad 0x0000000000000000        /* 0xe8 - PCIBIOS_CS */
++      .quad 0x0000000000000000        /* 0xf0 - PCIBIOS_DS */
++      .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
++
++      /* Be sure this is zeroed to avoid false validations in Xen */
++      .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++      .endr
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index a836860..1b5c665 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -20,6 +20,8 @@
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
+ #include <asm/nops.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative-asm.h>
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+       .text
+       __HEAD
+@@ -89,11 +97,23 @@ startup_64:
+        * Fixup the physical addresses in the page table
+        */
+       addq    %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+-      addq    %rbp, level3_kernel_pgt + (510*8)(%rip)
+-      addq    %rbp, level3_kernel_pgt + (511*8)(%rip)
++      addq    %rbp, level3_ident_pgt + (0*8)(%rip)
++#ifndef CONFIG_XEN
++      addq    %rbp, level3_ident_pgt + (1*8)(%rip)
++#endif
+-      addq    %rbp, level2_fixmap_pgt + (506*8)(%rip)
++      addq    %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
++
++      addq    %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++      addq    %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
++
++      addq    %rbp, level2_fixmap_pgt + (507*8)(%rip)
+       /*
+        * Set up the identity mapping for the switchover.  These
+@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
+       movq    $(init_level4_pgt - __START_KERNEL_map), %rax
+ 1:
+-      /* Enable PAE mode and PGE */
+-      movl    $(X86_CR4_PAE | X86_CR4_PGE), %ecx
++      /* Enable PAE mode and PSE/PGE */
++      movl    $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
+       movq    %rcx, %cr4
+       /* Setup early boot stage 4 level pagetables. */
+@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_SCE, %eax        /* Enable System Call */
+-      btl     $20,%edi                /* No Execute supported? */
++      btl     $(X86_FEATURE_NX & 31),%edi     /* No Execute supported? */
+       jnc     1f
+       btsl    $_EFER_NX, %eax
+       btsq    $_PAGE_BIT_NX,early_pmd_flags(%rip)
++      leaq    init_level4_pgt(%rip), %rdi
++#ifndef CONFIG_EFI
++      btsq    $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++#endif
++      btsq    $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++      btsq    $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
++      btsq    $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
++      btsq    $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1:    wrmsr                           /* Make changes effective */
+       /* Setup cr0 */
+@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
+        *      REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
+        *              address given in m16:64.
+        */
++      pax_set_fptr_mask
+       movq    initial_code(%rip),%rax
+       pushq   $0              # fake return address to stop unwinder
+       pushq   $__KERNEL_CS    # set correct cs
+@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
+       call dump_stack
+ #ifdef CONFIG_KALLSYMS        
+       leaq early_idt_ripmsg(%rip),%rdi
+-      movq 40(%rsp),%rsi      # %rip again
++      movq 88(%rsp),%rsi      # %rip again
+       call __print_symbol
+ #endif
+ #endif /* EARLY_PRINTK */
+@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
+ early_recursion_flag:
+       .long 0
++      .section .rodata,"a",@progbits
+ #ifdef CONFIG_EARLY_PRINTK
+ early_idt_msg:
+       .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
+ NEXT_PAGE(early_dynamic_pgts)
+       .fill   512*EARLY_DYNAMIC_PAGE_TABLES,8,0
+-      .data
++      .section .rodata,"a",@progbits
+-#ifndef CONFIG_XEN
+ NEXT_PAGE(init_level4_pgt)
+-      .fill   512,8,0
+-#else
+-NEXT_PAGE(init_level4_pgt)
+-      .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
+       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMALLOC_START*8, 0
++      .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMALLOC_END*8, 0
++      .quad   level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMEMMAP_START*8, 0
++      .quad   level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_START_KERNEL*8, 0
+       /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++      .rept 2*NR_CPUS
++      .fill   512,8,0
++      .endr
++#endif
++
+ NEXT_PAGE(level3_ident_pgt)
+       .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+       .fill   511, 8, 0
++#else
++      .quad   level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++      .fill   510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_start_pgt)
++      .fill   512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
++      .fill   512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++      .fill   L3_VMEMMAP_START,8,0
++      .quad   level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
+ NEXT_PAGE(level2_ident_pgt)
+-      /* Since I easily can, map the first 1G.
++      /* Since I easily can, map the first 2G.
+        * Don't set NX because code runs from these pages.
+        */
+-      PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+-#endif
++      PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
+ NEXT_PAGE(level3_kernel_pgt)
+       .fill   L3_START_KERNEL,8,0
+@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
+       .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
++NEXT_PAGE(level2_vmemmap_pgt)
++      .fill   512,8,0
++
+ NEXT_PAGE(level2_kernel_pgt)
+       /*
+        * 512 MB kernel mapping. We spend a full page on this pagetable
+@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
+               KERNEL_IMAGE_SIZE/PMD_SIZE)
+ NEXT_PAGE(level2_fixmap_pgt)
+-      .fill   506,8,0
+-      .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+-      /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+-      .fill   5,8,0
++      .fill   507,8,0
++      .quad   level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++      /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++      .fill   4,8,0
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+       .fill   512,8,0
+ #undef PMDS
+-      .data
++      .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++      .rept NR_CPUS
++      .quad   0x0000000000000000      /* NULL descriptor */
++      .quad   0x00cf9b000000ffff      /* __KERNEL32_CS */
++      .quad   0x00af9b000000ffff      /* __KERNEL_CS */
++      .quad   0x00cf93000000ffff      /* __KERNEL_DS */
++      .quad   0x00cffb000000ffff      /* __USER32_CS */
++      .quad   0x00cff3000000ffff      /* __USER_DS, __USER32_DS  */
++      .quad   0x00affb000000ffff      /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .quad   0x00af9b000000ffff      /* __KERNEXEC_KERNEL_CS */
++#else
++      .quad   0x0                     /* unused */
++#endif
++
++      .quad   0,0                     /* TSS */
++      .quad   0,0                     /* LDT */
++      .quad   0,0,0                   /* three TLS descriptors */
++      .quad   0x0000f40000000000      /* node/CPU stored in limit */
++      /* asm/segment.h:GDT_ENTRIES must match this */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      .quad   0x00cf93000000ffff      /* __UDEREF_KERNEL_DS */
++#else
++      .quad   0x0                     /* unused */
++#endif
++
++      /* zero the remaining page */
++      .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++      .endr
++
+       .align 16
+       .globl early_gdt_descr
+ early_gdt_descr:
+       .word   GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+-      .quad   INIT_PER_CPU_VAR(gdt_page)
++      .quad   cpu_gdt_table
+ ENTRY(phys_base)
+       /* This must match the first entry in level2_kernel_pgt */
+       .quad   0x0000000000000000
+ #include "../../x86/xen/xen-head.S"
+-      
+-      .section .bss, "aw", @nobits
++
++      .section .rodata,"a",@progbits
++NEXT_PAGE(empty_zero_page)
++      .skip PAGE_SIZE
++
+       .align PAGE_SIZE
+ ENTRY(idt_table)
+-      .skip IDT_ENTRIES * 16
++      .fill 512,8,0
+       .align L1_CACHE_BYTES
+ ENTRY(nmi_idt_table)
+-      .skip IDT_ENTRIES * 16
+-
+-      __PAGE_ALIGNED_BSS
+-NEXT_PAGE(empty_zero_page)
+-      .skip PAGE_SIZE
++      .fill 512,8,0
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 0fa6912..b37438b 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index f7ea30d..6318acc 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
+ static inline bool interrupted_user_mode(void)
+ {
+       struct pt_regs *regs = get_irq_regs();
+-      return regs && user_mode_vm(regs);
++      return regs && user_mode(regs);
+ }
+ /*
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 9a5c460..84868423 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
+ static void make_8259A_irq(unsigned int irq)
+ {
+       disable_irq_nosync(irq);
+-      io_apic_irqs &= ~(1<<irq);
++      io_apic_irqs &= ~(1UL<<irq);
+       irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+                                     i8259A_chip.name);
+       enable_irq(irq);
+@@ -209,7 +209,7 @@ spurious_8259A_irq:
+                              "spurious 8259A interrupt: IRQ%d.\n", irq);
+                       spurious_irq_mask |= irqmask;
+               }
+-              atomic_inc(&irq_err_count);
++              atomic_inc_unchecked(&irq_err_count);
+               /*
+                * Theoretically we do not have to handle this IRQ,
+                * but in Linux this does not cause problems and is
+@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
+       /* (slave's support for AEOI in flat mode is to be investigated) */
+       outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
++      pax_open_kernel();
+       if (auto_eoi)
+               /*
+                * In AEOI mode we just have to mask the interrupt
+                * when acking.
+                */
+-              i8259A_chip.irq_mask_ack = disable_8259A_irq;
++              *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
+       else
+-              i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
++              *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
++      pax_close_kernel();
+       udelay(100);            /* wait for 8259A to initialize */
+diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
+index a979b5b..1d6db75 100644
+--- a/arch/x86/kernel/io_delay.c
++++ b/arch/x86/kernel/io_delay.c
+@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
+  * Quirk table for systems that misbehave (lock up, etc.) if port
+  * 0x80 is used:
+  */
+-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
++static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
+       {
+               .callback       = dmi_io_delay_0xed_port,
+               .ident          = "Compaq Presario V6000",
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 4ddaf66..6292f4e 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+       if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+               return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++      if (turn_on && grsec_disable_privio) {
++              gr_handle_ioperm();
++              return -EPERM;
++      }
++#endif
+       if (turn_on && !capable(CAP_SYS_RAWIO))
+               return -EPERM;
+@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+        * because the ->io_bitmap_max value must match the bitmap
+        * contents:
+        */
+-      tss = &per_cpu(init_tss, get_cpu());
++      tss = init_tss + get_cpu();
+       if (turn_on)
+               bitmap_clear(t->io_bitmap_ptr, from, num);
+@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+               return -EINVAL;
+       /* Trying to gain more privileges? */
+       if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++              if (grsec_disable_privio) {
++                      gr_handle_iopl();
++                      return -EPERM;
++              }
++#endif
+               if (!capable(CAP_SYS_RAWIO))
+                       return -EPERM;
+       }
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index ac0631d..ff7cb62 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -18,7 +18,7 @@
+ #include <asm/mce.h>
+ #include <asm/hw_irq.h>
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+               seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
+       seq_printf(p, "  Machine check polls\n");
+ #endif
+-      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+-      seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++      seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+       return 0;
+ }
+@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ u64 arch_irq_stat(void)
+ {
+-      u64 sum = atomic_read(&irq_err_count);
++      u64 sum = atomic_read_unchecked(&irq_err_count);
+       return sum;
+ }
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 344faf8..355f60d 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
+       __asm__ __volatile__("andl %%esp,%0" :
+                            "=r" (sp) : "0" (THREAD_SIZE - 1));
+-      return sp < (sizeof(struct thread_info) + STACK_WARN);
++      return sp < STACK_WARN;
+ }
+ static void print_stack_overflow(void)
+@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
+  * per-CPU IRQ handling contexts (thread information and stack)
+  */
+ union irq_ctx {
+-      struct thread_info      tinfo;
+-      u32                     stack[THREAD_SIZE/sizeof(u32)];
++      unsigned long           previous_esp;
++      u32                     stack[THREAD_SIZE/sizeof(u32)];
+ } __attribute__((aligned(THREAD_SIZE)));
+ static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+-      union irq_ctx *curctx, *irqctx;
++      union irq_ctx *irqctx;
+       u32 *isp, arg1, arg2;
+-      curctx = (union irq_ctx *) current_thread_info();
+       irqctx = __this_cpu_read(hardirq_ctx);
+       /*
+@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+        * handler) we can't do that and just have to keep using the
+        * current stack (which is the irq stack already after all)
+        */
+-      if (unlikely(curctx == irqctx))
++      if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
+               return 0;
+       /* build the stack frame on the IRQ stack */
+-      isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+-      irqctx->tinfo.task = curctx->tinfo.task;
+-      irqctx->tinfo.previous_esp = current_stack_pointer;
++      isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++      irqctx->previous_esp = current_stack_pointer;
+-      /* Copy the preempt_count so that the [soft]irq checks work. */
+-      irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(MAKE_MM_SEG(0));
++#endif
+       if (unlikely(overflow))
+               call_on_stack(print_stack_overflow, isp);
+@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+                    :  "0" (irq),   "1" (desc),  "2" (isp),
+                       "D" (desc->handle_irq)
+                    : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+       return 1;
+ }
+@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+  */
+ void __cpuinit irq_ctx_init(int cpu)
+ {
+-      union irq_ctx *irqctx;
+-
+       if (per_cpu(hardirq_ctx, cpu))
+               return;
+-      irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+-                                             THREADINFO_GFP,
+-                                             THREAD_SIZE_ORDER));
+-      memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+-      irqctx->tinfo.cpu               = cpu;
+-      irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+-      irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+-
+-      per_cpu(hardirq_ctx, cpu) = irqctx;
+-
+-      irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+-                                             THREADINFO_GFP,
+-                                             THREAD_SIZE_ORDER));
+-      memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+-      irqctx->tinfo.cpu               = cpu;
+-      irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+-
+-      per_cpu(softirq_ctx, cpu) = irqctx;
++      per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
++      per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
++ 
++      printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
++             cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
+       printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+              cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
+@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
+ asmlinkage void do_softirq(void)
+ {
+       unsigned long flags;
+-      struct thread_info *curctx;
+       union irq_ctx *irqctx;
+       u32 *isp;
+@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
+       local_irq_save(flags);
+       if (local_softirq_pending()) {
+-              curctx = current_thread_info();
+               irqctx = __this_cpu_read(softirq_ctx);
+-              irqctx->tinfo.task = curctx->task;
+-              irqctx->tinfo.previous_esp = current_stack_pointer;
++              irqctx->previous_esp = current_stack_pointer;
+               /* build the stack frame on the softirq stack */
+-              isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++              isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++              __set_fs(MAKE_MM_SEG(0));
++#endif
+               call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++              __set_fs(current_thread_info()->addr_limit);
++#endif
++
+               /*
+                * Shouldn't happen, we returned above if in_interrupt():
+                */
+@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
+       if (unlikely(!desc))
+               return false;
+-      if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
++      if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+               if (unlikely(overflow))
+                       print_stack_overflow();
+               desc->handle_irq(irq, desc);
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index d04d3ec..ea4b374 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+       u64 estack_top, estack_bottom;
+       u64 curbase = (u64)task_stack_page(current);
+-      if (user_mode_vm(regs))
++      if (user_mode(regs))
+               return;
+       if (regs->sp >= curbase + sizeof(struct thread_info) +
+diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
+index dc1404b..bbc43e7 100644
+--- a/arch/x86/kernel/kdebugfs.c
++++ b/arch/x86/kernel/kdebugfs.c
+@@ -27,7 +27,7 @@ struct setup_data_node {
+       u32 len;
+ };
+-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
++static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
+                              size_t count, loff_t *ppos)
+ {
+       struct setup_data_node *node = file->private_data;
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index 836f832..a8bda67 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+ #ifdef CONFIG_X86_32
+       switch (regno) {
+       case GDB_SS:
+-              if (!user_mode_vm(regs))
++              if (!user_mode(regs))
+                       *(unsigned long *)mem = __KERNEL_DS;
+               break;
+       case GDB_SP:
+-              if (!user_mode_vm(regs))
++              if (!user_mode(regs))
+                       *(unsigned long *)mem = kernel_stack_pointer(regs);
+               break;
+       case GDB_GS:
+@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
+               bp->attr.bp_addr = breakinfo[breakno].addr;
+               bp->attr.bp_len = breakinfo[breakno].len;
+               bp->attr.bp_type = breakinfo[breakno].type;
+-              info->address = breakinfo[breakno].addr;
++              if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
++                      info->address = ktla_ktva(breakinfo[breakno].addr);
++              else
++                      info->address = breakinfo[breakno].addr;
+               info->len = breakinfo[breakno].len;
+               info->type = breakinfo[breakno].type;
+               val = arch_install_hw_breakpoint(bp);
+@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+       case 'k':
+               /* clear the trace bit */
+               linux_regs->flags &= ~X86_EFLAGS_TF;
+-              atomic_set(&kgdb_cpu_doing_single_step, -1);
++              atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+               /* set the trace bit if we're stepping */
+               if (remcomInBuffer[0] == 's') {
+                       linux_regs->flags |= X86_EFLAGS_TF;
+-                      atomic_set(&kgdb_cpu_doing_single_step,
++                      atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+                                  raw_smp_processor_id());
+               }
+@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+       switch (cmd) {
+       case DIE_DEBUG:
+-              if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++              if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+                       if (user_mode(regs))
+                               return single_step_cont(regs, args);
+                       break;
+@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+ #endif /* CONFIG_DEBUG_RODATA */
+       bpt->type = BP_BREAKPOINT;
+-      err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
++      err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
+                               BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+-      err = probe_kernel_write((char *)bpt->bpt_addr,
++      err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
+                                arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ #ifdef CONFIG_DEBUG_RODATA
+       if (!err)
+@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+               return -EBUSY;
+       text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+                 BREAK_INSTR_SIZE);
+-      err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++      err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+       if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+       if (mutex_is_locked(&text_mutex))
+               goto knl_write;
+       text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+-      err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++      err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
+       if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+               goto knl_write;
+       return err;
+ knl_write:
+ #endif /* CONFIG_DEBUG_RODATA */
+-      return probe_kernel_write((char *)bpt->bpt_addr,
++      return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
+                                 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+ }
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 211bce4..6e2580a 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
+               s32 raddr;
+       } __packed *insn;
+-      insn = (struct __arch_relative_insn *)from;
++      insn = (struct __arch_relative_insn *)ktla_ktva(from);
++
++      pax_open_kernel();
+       insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+       insn->op = op;
++      pax_close_kernel();
+ }
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
+       kprobe_opcode_t opcode;
+       kprobe_opcode_t *orig_opcodes = opcodes;
+-      if (search_exception_tables((unsigned long)opcodes))
++      if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+               return 0;       /* Page fault may occur on this address. */
+ retry:
+@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
+        *  for the first byte, we can recover the original instruction
+        *  from it and kp->opcode.
+        */
+-      memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++      memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       buf[0] = kp->opcode;
+-      return (unsigned long)buf;
++      return ktva_ktla((unsigned long)buf);
+ }
+ /*
+@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
+       /* Another subsystem puts a breakpoint, failed to recover */
+       if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               return 0;
++      pax_open_kernel();
+       memcpy(dest, insn.kaddr, insn.length);
++      pax_close_kernel();
+ #ifdef CONFIG_X86_64
+       if (insn_rip_relative(&insn)) {
+@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
+                       return 0;
+               }
+               disp = (u8 *) dest + insn_offset_displacement(&insn);
++              pax_open_kernel();
+               *(s32 *) disp = (s32) newdisp;
++              pax_close_kernel();
+       }
+ #endif
+       return insn.length;
+@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
+                * nor set current_kprobe, because it doesn't use single
+                * stepping.
+                */
+-              regs->ip = (unsigned long)p->ainsn.insn;
++              regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+               preempt_enable_no_resched();
+               return;
+       }
+@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
+       regs->flags &= ~X86_EFLAGS_IF;
+       /* single step inline if the instruction is an int3 */
+       if (p->opcode == BREAKPOINT_INSTRUCTION)
+-              regs->ip = (unsigned long)p->addr;
++              regs->ip = ktla_ktva((unsigned long)p->addr);
+       else
+-              regs->ip = (unsigned long)p->ainsn.insn;
++              regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+ /*
+@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
+                               setup_singlestep(p, regs, kcb, 0);
+                       return 1;
+               }
+-      } else if (*addr != BREAKPOINT_INSTRUCTION) {
++      } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+               /*
+                * The breakpoint instruction was removed right
+                * after we hit it.  Another cpu has removed
+@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
+                       "       movq %rax, 152(%rsp)\n"
+                       RESTORE_REGS_STRING
+                       "       popfq\n"
++#ifdef KERNEXEC_PLUGIN
++                      "       btsq $63,(%rsp)\n"
++#endif
+ #else
+                       "       pushf\n"
+                       SAVE_REGS_STRING
+@@ -779,7 +789,7 @@ static void __kprobes
+ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+       unsigned long *tos = stack_addr(regs);
+-      unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++      unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+       unsigned long orig_ip = (unsigned long)p->addr;
+       kprobe_opcode_t *insn = p->ainsn.insn;
+@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
+       struct die_args *args = data;
+       int ret = NOTIFY_DONE;
+-      if (args->regs && user_mode_vm(args->regs))
++      if (args->regs && user_mode(args->regs))
+               return ret;
+       switch (val) {
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 76dc6f0..66bdfc3 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -79,6 +79,7 @@ found:
+ /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
+ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+ {
++      pax_open_kernel();
+ #ifdef CONFIG_X86_64
+       *addr++ = 0x48;
+       *addr++ = 0xbf;
+@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
+       *addr++ = 0xb8;
+ #endif
+       *(unsigned long *)addr = val;
++      pax_close_kernel();
+ }
+ static void __used __kprobes kprobes_optinsn_template_holder(void)
+@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+        * Verify if the address gap is in 2GB range, because this uses
+        * a relative jump.
+        */
+-      rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++      rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+       if (abs(rel) > 0x7fffffff)
+               return -ERANGE;
+@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+       op->optinsn.size = ret;
+       /* Copy arch-dep-instance from template */
+-      memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
++      pax_open_kernel();
++      memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
++      pax_close_kernel();
+       /* Set probe information */
+       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+       /* Set probe function call */
+-      synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++      synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
+       /* Set returning jmp instruction at the tail of out-of-line buffer */
+-      synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
++      synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
+                          (u8 *)op->kp.addr + op->optinsn.size);
+       flush_icache_range((unsigned long) buf,
+@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
+                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+       /* Backup instructions which will be replaced by jump address */
+-      memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++      memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+              RELATIVE_ADDR_SIZE);
+       insn_buf[0] = RELATIVEJUMP_OPCODE;
+@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+               /* This kprobe is really able to run optimized path. */
+               op = container_of(p, struct optimized_kprobe, kp);
+               /* Detour through copied instructions */
+-              regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
++              regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
+               if (!reenter)
+                       reset_current_kprobe();
+               preempt_enable_no_resched();
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index cd6d9a5..16245a4 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
++static struct notifier_block kvm_cpu_notifier = {
+         .notifier_call  = kvm_cpu_notify,
+ };
+ #endif
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ebc9873..1b9724b 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+       if (reload) {
+ #ifdef CONFIG_SMP
+               preempt_disable();
+-              load_LDT(pc);
++              load_LDT_nolock(pc);
+               if (!cpumask_equal(mm_cpumask(current->mm),
+                                  cpumask_of(smp_processor_id())))
+                       smp_call_function(flush_ldt, current->mm, 1);
+               preempt_enable();
+ #else
+-              load_LDT(pc);
++              load_LDT_nolock(pc);
+ #endif
+       }
+       if (oldsize) {
+@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+               return err;
+       for (i = 0; i < old->size; i++)
+-              write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++              write_ldt_entry(new->ldt, i, old->ldt + i);
+       return 0;
+ }
+@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+               retval = copy_ldt(&mm->context, &old_mm->context);
+               mutex_unlock(&old_mm->context.lock);
+       }
++
++      if (tsk == current) {
++              mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              mm->context.user_cs_base = 0UL;
++              mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++              cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++      }
++
+       return retval;
+ }
+@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+               }
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++              error = -EINVAL;
++              goto out_unlock;
++      }
++#endif
++
+       fill_ldt(&ldt, &ldt_info);
+       if (oldmode)
+               ldt.avl = 0;
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index 5b19e4d..6476a76 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -26,7 +26,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+       struct desc_ptr curidt;
+@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
+ }
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+       struct desc_ptr curgdt;
+@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
+       }
+       control_page = page_address(image->control_code_page);
+-      memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++      memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+       relocate_kernel_ptr = control_page;
+       page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index 22db92b..d546bec 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata mc_cpu_notifier = {
++static struct notifier_block mc_cpu_notifier = {
+       .notifier_call  = mc_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
+index 5fb2ceb..3ae90bb 100644
+--- a/arch/x86/kernel/microcode_intel.c
++++ b/arch/x86/kernel/microcode_intel.c
+@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+-      return copy_from_user(to, from, n);
++      return copy_from_user(to, (const void __force_user *)from, n);
+ }
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+-      return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++      return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
+ }
+ static void microcode_fini_cpu(int cpu)
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 216a4d7..228255a 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -43,15 +43,60 @@ do {                                                       \
+ } while (0)
+ #endif
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+-      if (PAGE_ALIGN(size) > MODULES_LEN)
++      if (!size || PAGE_ALIGN(size) > MODULES_LEN)
+               return NULL;
+       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+-                              GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++                              GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
+                               -1, __builtin_return_address(0));
+ }
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      return __module_alloc(size, PAGE_KERNEL);
++#else
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size)
++{
++      struct vm_struct *area;
++
++      if (size == 0)
++              return NULL;
++
++      area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++      return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++      vunmap(module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void module_free_exec(struct module *mod, void *module_region)
++{
++      module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++      return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ #ifdef CONFIG_X86_32
+ int apply_relocate(Elf32_Shdr *sechdrs,
+                  const char *strtab,
+@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+       unsigned int i;
+       Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+       Elf32_Sym *sym;
+-      uint32_t *location;
++      uint32_t *plocation, location;
+       DEBUGP("Applying relocate section %u to %u\n",
+              relsec, sechdrs[relsec].sh_info);
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+               /* This is where to make the change */
+-              location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+-                      + rel[i].r_offset;
++              plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++              location = (uint32_t)plocation;
++              if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++                      plocation = ktla_ktva((void *)plocation);
+               /* This is the symbol it is referring to.  Note that all
+                  undefined symbols have been resolved.  */
+               sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+               switch (ELF32_R_TYPE(rel[i].r_info)) {
+               case R_386_32:
+                       /* We add the value into the location given */
+-                      *location += sym->st_value;
++                      pax_open_kernel();
++                      *plocation += sym->st_value;
++                      pax_close_kernel();
+                       break;
+               case R_386_PC32:
+                       /* Add the value, subtract its position */
+-                      *location += sym->st_value - (uint32_t)location;
++                      pax_open_kernel();
++                      *plocation += sym->st_value - location;
++                      pax_close_kernel();
+                       break;
+               default:
+                       pr_err("%s: Unknown relocation: %u\n",
+@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+               case R_X86_64_NONE:
+                       break;
+               case R_X86_64_64:
++                      pax_open_kernel();
+                       *(u64 *)loc = val;
++                      pax_close_kernel();
+                       break;
+               case R_X86_64_32:
++                      pax_open_kernel();
+                       *(u32 *)loc = val;
++                      pax_close_kernel();
+                       if (val != *(u32 *)loc)
+                               goto overflow;
+                       break;
+               case R_X86_64_32S:
++                      pax_open_kernel();
+                       *(s32 *)loc = val;
++                      pax_close_kernel();
+                       if ((s64)val != *(s32 *)loc)
+                               goto overflow;
+                       break;
+               case R_X86_64_PC32:
+                       val -= (u64)loc;
++                      pax_open_kernel();
+                       *(u32 *)loc = val;
++                      pax_close_kernel();
++
+ #if 0
+                       if ((s64)val != *(s32 *)loc)
+                               goto overflow;
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index ce13049..e2e9c3c 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block __refdata msr_class_cpu_notifier = {
++static struct notifier_block msr_class_cpu_notifier = {
+       .notifier_call = msr_class_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index 6030805..2d33f21 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
+       return handled;
+ }
+-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
++int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
+ {
+       struct nmi_desc *desc = nmi_to_desc(type);
+       unsigned long flags;
+@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+        * event confuses some handlers (kdump uses this flag)
+        */
+       if (action->flags & NMI_FLAG_FIRST)
+-              list_add_rcu(&action->list, &desc->head);
++              pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
+       else
+-              list_add_tail_rcu(&action->list, &desc->head);
++              pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
+       
+       spin_unlock_irqrestore(&desc->lock, flags);
+       return 0;
+@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
+               if (!strcmp(n->name, name)) {
+                       WARN(in_nmi(),
+                               "Trying to free NMI (%s) from NMI context!\n", n->name);
+-                      list_del_rcu(&n->list);
++                      pax_list_del_rcu((struct list_head *)&n->list);
+                       break;
+               }
+       }
+@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
+ dotraplinkage notrace __kprobes void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (!user_mode(regs)) {
++              unsigned long cs = regs->cs & 0xFFFF;
++              unsigned long ip = ktva_ktla(regs->ip);
++
++              if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++                      regs->ip = ip;
++      }
++#endif
++
+       nmi_nesting_preprocess(regs);
+       nmi_enter();
+diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
+index 6d9582e..f746287 100644
+--- a/arch/x86/kernel/nmi_selftest.c
++++ b/arch/x86/kernel/nmi_selftest.c
+@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
+ {
+       /* trap all the unknown NMIs we may generate */
+       register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
+-                      __initdata);
++                      __initconst);
+ }
+ static void __init cleanup_nmi_testsuite(void)
+@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
+       unsigned long timeout;
+       if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+-                               NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
++                               NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
+               nmi_fail = FAILURE;
+               return;
+       }
+diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
+index 676b8c7..870ba04 100644
+--- a/arch/x86/kernel/paravirt-spinlocks.c
++++ b/arch/x86/kernel/paravirt-spinlocks.c
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+       arch_spin_lock(lock);
+ }
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+       .spin_is_locked = __ticket_spin_is_locked,
+       .spin_is_contended = __ticket_spin_is_contended,
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index cd6de64..27c6af0 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
+ {
+       return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+ void __init default_banner(void)
+ {
+@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+       if (opfunc == NULL)
+               /* If there's no function, patch it with a ud2a (BUG) */
+               ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+-      else if (opfunc == _paravirt_nop)
++      else if (opfunc == (void *)_paravirt_nop)
+               /* If the operation is a nop, then nop the callsite */
+               ret = paravirt_patch_nop();
+       /* identity functions just return their single argument */
+-      else if (opfunc == _paravirt_ident_32)
++      else if (opfunc == (void *)_paravirt_ident_32)
+               ret = paravirt_patch_ident_32(insnbuf, len);
+-      else if (opfunc == _paravirt_ident_64)
++      else if (opfunc == (void *)_paravirt_ident_64)
+               ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++      else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
++              ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+       else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+                type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+       if (insn_len > len || start == NULL)
+               insn_len = len;
+       else
+-              memcpy(insnbuf, start, insn_len);
++              memcpy(insnbuf, ktla_ktva(start), insn_len);
+       return insn_len;
+ }
+@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+       return this_cpu_read(paravirt_lazy_mode);
+ }
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+       .name = "bare hardware",
+       .paravirt_enabled = 0,
+       .kernel_rpl = 0,
+@@ -315,16 +322,16 @@ struct pv_info pv_info = {
+ #endif
+ };
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+       .patch = native_patch,
+ };
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+       .sched_clock = native_sched_clock,
+       .steal_clock = native_steal_clock,
+ };
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+       .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+       .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+       .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+       .cpuid = native_cpuid,
+       .get_debugreg = native_get_debugreg,
+       .set_debugreg = native_set_debugreg,
+@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
+       .end_context_switch = paravirt_nop,
+ };
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only= {
+ #ifdef CONFIG_X86_LOCAL_APIC
+       .startup_ipi_hook = paravirt_nop,
+ #endif
+ };
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT     PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT     __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT     __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+       .read_cr2 = native_read_cr2,
+       .write_cr2 = native_write_cr2,
+@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+       .make_pud = PTE_IDENT,
+       .set_pgd = native_set_pgd,
++      .set_pgd_batched = native_set_pgd_batched,
+ #endif
+ #endif /* PAGETABLE_LEVELS >= 3 */
+@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+       },
+       .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .pax_open_kernel = native_pax_open_kernel,
++      .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
+index 299d493..2ccb0ee 100644
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
+                       tce_space = be64_to_cpu(readq(target));
+                       tce_space = tce_space & TAR_SW_BITS;
+-                      tce_space = tce_space & (~specified_table_size);
++                      tce_space = tce_space & (~(unsigned long)specified_table_size);
+                       info->tce_space = (u64 *)__va(tce_space);
+               }
+       }
+diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
+index 35ccf75..7a15747 100644
+--- a/arch/x86/kernel/pci-iommu_table.c
++++ b/arch/x86/kernel/pci-iommu_table.c
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+ #define DEBUG 1
+diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
+index 6c483ba..d10ce2f 100644
+--- a/arch/x86/kernel/pci-swiotlb.c
++++ b/arch/x86/kernel/pci-swiotlb.c
+@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+                                     void *vaddr, dma_addr_t dma_addr,
+                                     struct dma_attrs *attrs)
+ {
+-      swiotlb_free_coherent(dev, size, vaddr, dma_addr);
++      swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
+ }
+ static struct dma_map_ops swiotlb_dma_ops = {
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 81a5f5e..20f8b58 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -36,7 +36,8 @@
+  * section. Since TSS's are completely CPU-local, we want them
+  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+  */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+ #ifdef CONFIG_X86_64
+ static DEFINE_PER_CPU(unsigned char, is_idle);
+@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
+         task_xstate_cachep =
+               kmem_cache_create("task_xstate", xstate_size,
+                                 __alignof__(union thread_xstate),
+-                                SLAB_PANIC | SLAB_NOTRACK, NULL);
++                                SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
+ }
+ /*
+@@ -105,7 +106,7 @@ void exit_thread(void)
+       unsigned long *bp = t->io_bitmap_ptr;
+       if (bp) {
+-              struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++              struct tss_struct *tss = init_tss + get_cpu();
+               t->io_bitmap_ptr = NULL;
+               clear_thread_flag(TIF_IO_BITMAP);
+@@ -125,6 +126,9 @@ void flush_thread(void)
+ {
+       struct task_struct *tsk = current;
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++      loadsegment(gs, 0);
++#endif
+       flush_ptrace_hw_breakpoint(tsk);
+       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+       drop_init_fpu(tsk);
+@@ -271,7 +275,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+       /* idle loop has pid 0 */
+-      if (current->pid)
++      if (task_pid_nr(current))
+               return;
+       __exit_idle();
+ }
+@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
+       return ret;
+ }
+ #endif
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+       local_irq_disable();
+       /*
+@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++void pax_randomize_kstack(struct pt_regs *regs)
+ {
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() % 8192;
+-      return sp & ~0xf;
+-}
++      struct thread_struct *thread = &current->thread;
++      unsigned long time;
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+-      unsigned long range_end = mm->brk + 0x02000000;
+-      return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
++      if (!randomize_va_space)
++              return;
++
++      if (v8086_mode(regs))
++              return;
++      rdtscl(time);
++
++      /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++      time &= 0x3EUL;
++      time <<= 2;
++#elif defined(CONFIG_X86_64)
++      time &= 0xFUL;
++      time <<= 4;
++#else
++      time &= 0x1FUL;
++      time <<= 3;
++#endif
++
++      thread->sp0 ^= time;
++      load_sp0(init_tss + smp_processor_id(), thread);
++
++#ifdef CONFIG_X86_64
++      this_cpu_write(kernel_stack, thread->sp0);
++#endif
++}
++#endif
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 7305f7d..22f73d6 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+       return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+ void __show_regs(struct pt_regs *regs, int all)
+@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
+       unsigned long sp;
+       unsigned short ss, gs;
+-      if (user_mode_vm(regs)) {
++      if (user_mode(regs)) {
+               sp = regs->sp;
+               ss = regs->ss & 0xffff;
+-              gs = get_user_gs(regs);
+       } else {
+               sp = kernel_stack_pointer(regs);
+               savesegment(ss, ss);
+-              savesegment(gs, gs);
+       }
++      gs = get_user_gs(regs);
+       printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
+                       (u16)regs->cs, regs->ip, regs->flags,
+-                      smp_processor_id());
++                      raw_smp_processor_id());
+       print_symbol("EIP is at %s\n", regs->ip);
+       printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
+ int copy_thread(unsigned long clone_flags, unsigned long sp,
+       unsigned long arg, struct task_struct *p)
+ {
+-      struct pt_regs *childregs = task_pt_regs(p);
++      struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+       struct task_struct *tsk;
+       int err;
+       p->thread.sp = (unsigned long) childregs;
+       p->thread.sp0 = (unsigned long) (childregs+1);
++      p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+       if (unlikely(p->flags & PF_KTHREAD)) {
+               /* kernel thread */
+               memset(childregs, 0, sizeof(struct pt_regs));
+               p->thread.ip = (unsigned long) ret_from_kernel_thread;
+-              task_user_gs(p) = __KERNEL_STACK_CANARY;
+-              childregs->ds = __USER_DS;
+-              childregs->es = __USER_DS;
++              savesegment(gs, childregs->gs);
++              childregs->ds = __KERNEL_DS;
++              childregs->es = __KERNEL_DS;
+               childregs->fs = __KERNEL_PERCPU;
+               childregs->bx = sp;     /* function */
+               childregs->bp = arg;
+@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       struct thread_struct *prev = &prev_p->thread,
+                                *next = &next_p->thread;
+       int cpu = smp_processor_id();
+-      struct tss_struct *tss = &per_cpu(init_tss, cpu);
++      struct tss_struct *tss = init_tss + cpu;
+       fpu_switch_t fpu;
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+        */
+       lazy_save_gs(prev->gs);
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+       /*
+        * Load the per-thread Thread-Local Storage descriptor.
+        */
+@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+        */
+       arch_end_context_switch(next_p);
++      this_cpu_write(current_task, next_p);
++      this_cpu_write(current_tinfo, &next_p->tinfo);
++
+       /*
+        * Restore %gs if needed (which is common)
+        */
+@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       switch_fpu_finish(next_p, fpu);
+-      this_cpu_write(current_task, next_p);
+-
+       return prev_p;
+ }
+@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
+       } while (count++ < 16);
+       return 0;
+ }
+-
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 355ae06..560fbbe 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+       struct pt_regs *childregs;
+       struct task_struct *me = current;
+-      p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
++      p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
+       childregs = task_pt_regs(p);
+       p->thread.sp = (unsigned long) childregs;
+       p->thread.usersp = me->thread.usersp;
++      p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+       set_tsk_thread_flag(p, TIF_FORK);
+       p->fpu_counter = 0;
+       p->thread.io_bitmap_ptr = NULL;
+@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
+       savesegment(es, p->thread.es);
+       savesegment(ds, p->thread.ds);
++      savesegment(ss, p->thread.ss);
++      BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
+       memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+       if (unlikely(p->flags & PF_KTHREAD)) {
+@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
+       int cpu = smp_processor_id();
+-      struct tss_struct *tss = &per_cpu(init_tss, cpu);
++      struct tss_struct *tss = init_tss + cpu;
+       unsigned fsindex, gsindex;
+       fpu_switch_t fpu;
+@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       if (unlikely(next->ds | prev->ds))
+               loadsegment(ds, next->ds);
++      savesegment(ss, prev->ss);
++      if (unlikely(next->ss != prev->ss))
++              loadsegment(ss, next->ss);
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       prev->usersp = this_cpu_read(old_rsp);
+       this_cpu_write(old_rsp, next->usersp);
+       this_cpu_write(current_task, next_p);
++      this_cpu_write(current_tinfo, &next_p->tinfo);
+-      this_cpu_write(kernel_stack,
+-                (unsigned long)task_stack_page(next_p) +
+-                THREAD_SIZE - KERNEL_STACK_OFFSET);
++      this_cpu_write(kernel_stack, next->sp0);
+       /*
+        * Now maybe reload the debug registers and handle I/O bitmaps
+@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+       stack = (unsigned long)task_stack_page(p);
+-      if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++      if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
+               return 0;
+       fp = *(u64 *)(p->thread.sp);
+       do {
+-              if (fp < (unsigned long)stack ||
+-                  fp >= (unsigned long)stack+THREAD_SIZE)
++              if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
+                       return 0;
+               ip = *(u64 *)(fp+8);
+               if (!in_sched_functions(ip))
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 29a8120..a50b5ee 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
+ {
+       unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+       unsigned long sp = (unsigned long)&regs->sp;
+-      struct thread_info *tinfo;
+-      if (context == (sp & ~(THREAD_SIZE - 1)))
++      if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
+               return sp;
+-      tinfo = (struct thread_info *)context;
+-      if (tinfo->previous_esp)
+-              return tinfo->previous_esp;
++      sp = *(unsigned long *)context;
++      if (sp)
++              return sp;
+       return (unsigned long)regs;
+ }
+@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
+ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
+ {
+       int i;
+-      int dr7 = 0;
++      unsigned long dr7 = 0;
+       struct arch_hw_breakpoint *info;
+       for (i = 0; i < HBP_NUM; i++) {
+@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
+ {
+       int ret;
+-      unsigned long __user *datap = (unsigned long __user *)data;
++      unsigned long __user *datap = (__force unsigned long __user *)data;
+       switch (request) {
+       /* read the word at location addr in the USER area. */
+@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
+               if ((int) addr < 0)
+                       return -EIO;
+               ret = do_get_thread_area(child, addr,
+-                                      (struct user_desc __user *)data);
++                                      (__force struct user_desc __user *) data);
+               break;
+       case PTRACE_SET_THREAD_AREA:
+               if ((int) addr < 0)
+                       return -EIO;
+               ret = do_set_thread_area(child, addr,
+-                                      (struct user_desc __user *)data, 0);
++                                      (__force struct user_desc __user *) data, 0);
+               break;
+ #endif
+@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ #ifdef CONFIG_X86_64
+-static struct user_regset x86_64_regsets[] __read_mostly = {
++static user_regset_no_const x86_64_regsets[] __read_only = {
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = sizeof(struct user_regs_struct) / sizeof(long),
+@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
+ #endif        /* CONFIG_X86_64 */
+ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+-static struct user_regset x86_32_regsets[] __read_mostly = {
++static user_regset_no_const x86_32_regsets[] __read_only = {
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = sizeof(struct user_regs_struct32) / sizeof(u32),
+@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
+  */
+ u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
++void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+ {
+ #ifdef CONFIG_X86_64
+       x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
+@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+       memset(info, 0, sizeof(*info));
+       info->si_signo = SIGTRAP;
+       info->si_code = si_code;
+-      info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++      info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+ void user_single_step_siginfo(struct task_struct *tsk,
+@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ # define IS_IA32      0
+ #endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+  * We must return the syscall number to actually look up in the table.
+  * This can be -1L to skip running any syscall at all.
+@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
+       user_exit();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       /*
+        * If we stepped into a sysenter/syscall insn, it trapped in
+        * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
+@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
+        */
+       user_exit();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       audit_syscall_exit(regs);
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 2cb9470..ff1fd80 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
+       return pv_tsc_khz;
+ }
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+ void pvclock_resume(void)
+ {
+-      atomic64_set(&last_value, 0);
++      atomic64_set_unchecked(&last_value, 0);
+ }
+ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
+@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+        * updating at the same time, and one of them could be slightly behind,
+        * making the assumption that last_value always go forward fail to hold.
+        */
+-      last = atomic64_read(&last_value);
++      last = atomic64_read_unchecked(&last_value);
+       do {
+               if (ret < last)
+                       return last;
+-              last = atomic64_cmpxchg(&last_value, last, ret);
++              last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+       } while (unlikely(last != ret));
+       return ret;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 76fa1e9..abf09ea 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_ACPI;
+ int reboot_force;
+@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
+ void __noreturn machine_real_restart(unsigned int type)
+ {
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++      struct desc_struct *gdt;
++#endif
++
+       local_irq_disable();
+       /*
+@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
+       /* Jump to the identity-mapped low memory code */
+ #ifdef CONFIG_X86_32
+-      asm volatile("jmpl *%0" : :
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      gdt = get_cpu_gdt_table(smp_processor_id());
++      pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++      gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++      gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++      gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++      pax_close_kernel();
++#endif
++
++      asm volatile("ljmpl *%0" : :
+                    "rm" (real_mode_header->machine_real_restart_asm),
+                    "a" (type));
+ #else
+@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
+  * try to force a triple fault and then cycle between hitting the keyboard
+  * controller and doing that
+  */
+-static void native_machine_emergency_restart(void)
++static void __noreturn native_machine_emergency_restart(void)
+ {
+       int i;
+       int attempt = 0;
+@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+-static void __machine_emergency_restart(int emergency)
++static void __noreturn __machine_emergency_restart(int emergency)
+ {
+       reboot_emergency = emergency;
+       machine_ops.emergency_restart();
+ }
+-static void native_machine_restart(char *__unused)
++static void __noreturn native_machine_restart(char *__unused)
+ {
+       pr_notice("machine restart\n");
+@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
+       __machine_emergency_restart(0);
+ }
+-static void native_machine_halt(void)
++static void __noreturn native_machine_halt(void)
+ {
+       /* Stop other cpus and apics */
+       machine_shutdown();
+@@ -679,7 +706,7 @@ static void native_machine_halt(void)
+       stop_this_cpu(NULL);
+ }
+-static void native_machine_power_off(void)
++static void __noreturn native_machine_power_off(void)
+ {
+       if (pm_power_off) {
+               if (!reboot_force)
+@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
+       }
+       /* A fallback in case there is no PM info available */
+       tboot_shutdown(TB_SHUTDOWN_HALT);
++      unreachable();
+ }
+-struct machine_ops machine_ops = {
++struct machine_ops machine_ops __read_only = {
+       .power_off = native_machine_power_off,
+       .shutdown = native_machine_shutdown,
+       .emergency_restart = native_machine_emergency_restart,
+diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
+index c8e41e9..64049ef 100644
+--- a/arch/x86/kernel/reboot_fixups_32.c
++++ b/arch/x86/kernel/reboot_fixups_32.c
+@@ -57,7 +57,7 @@ struct device_fixup {
+       unsigned int vendor;
+       unsigned int device;
+       void (*reboot_fixup)(struct pci_dev *);
+-};
++} __do_const;
+ /*
+  * PCI ids solely used for fixups_table go here
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index f2bb9c9..bed145d7 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -11,6 +11,7 @@
+ #include <asm/kexec.h>
+ #include <asm/processor-flags.h>
+ #include <asm/pgtable_types.h>
++#include <asm/alternative-asm.h>
+ /*
+  * Must be relocatable PIC code callable as a C function
+@@ -167,6 +168,7 @@ identity_mapped:
+       xorq    %r14, %r14
+       xorq    %r15, %r15
++      pax_force_retaddr 0, 1
+       ret
+ 1:
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 56f7fcf..2cfe4f1 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -110,6 +110,7 @@
+ #include <asm/mce.h>
+ #include <asm/alternative.h>
+ #include <asm/prom.h>
++#include <asm/boot.h>
+ /*
+  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
+@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
+ #endif
+-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+-unsigned long mmu_cr4_features;
++#ifdef CONFIG_X86_64
++unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
++#elif defined(CONFIG_X86_PAE)
++unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
+ #else
+-unsigned long mmu_cr4_features = X86_CR4_PAE;
++unsigned long mmu_cr4_features __read_only;
+ #endif
++void set_in_cr4(unsigned long mask)
++{
++      unsigned long cr4 = read_cr4();
++
++      if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
++              return;
++
++      pax_open_kernel();
++      mmu_cr4_features |= mask;
++      pax_close_kernel();
++
++      if (trampoline_cr4_features)
++              *trampoline_cr4_features = mmu_cr4_features;
++      cr4 |= mask;
++      write_cr4(cr4);
++}
++EXPORT_SYMBOL(set_in_cr4);
++
++void clear_in_cr4(unsigned long mask)
++{
++      unsigned long cr4 = read_cr4();
++
++      if (!(cr4 & mask) && cr4 == mmu_cr4_features)
++              return;
++
++      pax_open_kernel();
++      mmu_cr4_features &= ~mask;
++      pax_close_kernel();
++
++      if (trampoline_cr4_features)
++              *trampoline_cr4_features = mmu_cr4_features;
++      cr4 &= ~mask;
++      write_cr4(cr4);
++}
++EXPORT_SYMBOL(clear_in_cr4);
++
+ /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
+ int bootloader_type, bootloader_version;
+@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
+               switch (data->type) {
+               case SETUP_E820_EXT:
+-                      parse_e820_ext(data);
++                      parse_e820_ext((struct setup_data __force_kernel *)data);
+                       break;
+               case SETUP_DTB:
+                       add_dtb(pa_data);
+@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
+        * area (640->1Mb) as ram even though it is not.
+        * take them out.
+        */
+-      e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++      e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
+ /* called before trim_bios_range() to spare extra sanitize */
+ static void __init e820_add_kernel_range(void)
+ {
+-      u64 start = __pa_symbol(_text);
++      u64 start = __pa_symbol(ktla_ktva(_text));
+       u64 size = __pa_symbol(_end) - start;
+       /*
+@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
+ void __init setup_arch(char **cmdline_p)
+ {
++#ifdef CONFIG_X86_32
++      memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
++#else
+       memblock_reserve(__pa_symbol(_text),
+                        (unsigned long)__bss_stop - (unsigned long)_text);
++#endif
+       early_reserve_initrd();
+@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
+       if (!boot_params.hdr.root_flags)
+               root_mountflags &= ~MS_RDONLY;
+-      init_mm.start_code = (unsigned long) _text;
+-      init_mm.end_code = (unsigned long) _etext;
++      init_mm.start_code = ktla_ktva((unsigned long) _text);
++      init_mm.end_code = ktla_ktva((unsigned long) _etext);
+       init_mm.end_data = (unsigned long) _edata;
+       init_mm.brk = _brk_end;
+-      code_resource.start = __pa_symbol(_text);
+-      code_resource.end = __pa_symbol(_etext)-1;
+-      data_resource.start = __pa_symbol(_etext);
++      code_resource.start = __pa_symbol(ktla_ktva(_text));
++      code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
++      data_resource.start = __pa_symbol(_sdata);
+       data_resource.end = __pa_symbol(_edata)-1;
+       bss_resource.start = __pa_symbol(__bss_start);
+       bss_resource.end = __pa_symbol(__bss_stop)-1;
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 5cdff03..80fa283 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+       [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+       pg_data_t *last = NULL;
+-      unsigned int cpu;
++      int cpu;
+       for_each_possible_cpu(cpu) {
+               int node = early_cpu_to_node(cpu);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+       struct desc_struct gdt;
++      unsigned long base = per_cpu_offset(cpu);
+-      pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+-                      0x2 | DESCTYPE_S, 0x8);
+-      gdt.s = 1;
++      pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++                      0x83 | DESCTYPE_S, 0xC);
+       write_gdt_entry(get_cpu_gdt_table(cpu),
+                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
+       /* alrighty, percpu areas up and running */
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++              unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+               per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
+                */
+               set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++              if (!cpu)
++                      per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+               /*
+                * Up to this point, the boot CPU has been using .init.data
+                * area.  Reload any changed state for the boot CPU.
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 6956299..18126ec4 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
+        * Align the stack pointer according to the i386 ABI,
+        * i.e. so that on function entry ((sp + 4) & 15) == 0.
+        */
+-      sp = ((sp + 4) & -16ul) - 4;
++      sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+       sp = round_down(sp, 16) - 8;
+ #endif
+@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+       }
+       if (current->mm->context.vdso)
+-              restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++              restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+       else
+-              restorer = &frame->retcode;
++              restorer = (void __user *)&frame->retcode;
+       if (ksig->ka.sa.sa_flags & SA_RESTORER)
+               restorer = ksig->ka.sa.sa_restorer;
+@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+-      err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++      err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+       if (err)
+               return -EFAULT;
+@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+-              err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
++              __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+               /* Set up to return from userspace.  */
+-              restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++              if (current->mm->context.vdso)
++                      restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++              else
++              restorer = (void __user *)&frame->retcode;
+               if (ksig->ka.sa.sa_flags & SA_RESTORER)
+                       restorer = ksig->ka.sa.sa_restorer;
+               put_user_ex(restorer, &frame->pretcode);
+@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+                * reasons and because gdb uses it as a signature to notice
+                * signal handler stack frames.
+                */
+-              put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++              put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       
+       err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+-              err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
++              __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+               /* Set up to return from userspace.  If provided, use a stub
+                  already in userspace.  */
+@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+ {
+       int usig = signr_convert(ksig->sig);
+       sigset_t *set = sigmask_to_save();
+-      compat_sigset_t *cset = (compat_sigset_t *) set;
++      sigset_t sigcopy;
++      compat_sigset_t *cset;
++
++      sigcopy = *set;
++
++      cset = (compat_sigset_t *) &sigcopy;
+       /* Set up the stack frame */
+       if (is_ia32_frame()) {
+@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+       } else if (is_x32_frame()) {
+               return x32_setup_rt_frame(ksig, cset, regs);
+       } else {
+-              return __setup_rt_frame(ksig->sig, ksig, set, regs);
++              return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
+       }
+ }
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 48d2b7d..90d328a 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
+ __setup("nonmi_ipi", nonmi_ipi_setup);
+-struct smp_ops smp_ops = {
++struct smp_ops smp_ops __read_only = {
+       .smp_prepare_boot_cpu   = native_smp_prepare_boot_cpu,
+       .smp_prepare_cpus       = native_smp_prepare_cpus,
+       .smp_cpus_done          = native_smp_cpus_done,
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index bfd348e..914f323 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
+       enable_start_cpu0 = 0;
+-#ifdef CONFIG_X86_32
+-      /* switch away from the initial page table */
+-      load_cr3(swapper_pg_dir);
+-      __flush_tlb_all();
+-#endif
+-
+       /* otherwise gcc will move up smp_processor_id before the cpu_init */
+       barrier();
++
++      /* switch away from the initial page table */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
++      __flush_tlb_all();
++#elif defined(CONFIG_X86_32)
++      load_cr3(swapper_pg_dir);
++      __flush_tlb_all();
++#endif
++
+       /*
+        * Check TSC synchronization with the BP:
+        */
+@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+       idle->thread.sp = (unsigned long) (((struct pt_regs *)
+                         (THREAD_SIZE +  task_stack_page(idle))) - 1);
+       per_cpu(current_task, cpu) = idle;
++      per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+       /* Stack for startup_32 can be just as for start_secondary onwards */
+@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+ #else
+       clear_tsk_thread_flag(idle, TIF_FORK);
+       initial_gs = per_cpu_offset(cpu);
+-      per_cpu(kernel_stack, cpu) =
+-              (unsigned long)task_stack_page(idle) -
+-              KERNEL_STACK_OFFSET + THREAD_SIZE;
++      per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ #endif
++
++      pax_open_kernel();
+       early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++      pax_close_kernel();
++
+       initial_code = (unsigned long)start_secondary;
+       stack_start  = idle->thread.sp;
+@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+       /* the FPU context is blank, nobody can own it */
+       __cpu_disable_lazy_restore(cpu);
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++#endif
++
+       err = do_boot_cpu(apicid, cpu, tidle);
+       if (err) {
+               pr_debug("do_boot_cpu failed %d\n", err);
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index 9b4d51d..5d28b58 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+               struct desc_struct *desc;
+               unsigned long base;
+-              seg &= ~7UL;
++              seg >>= 3;
+               mutex_lock(&child->mm->context.lock);
+-              if (unlikely((seg >> 3) >= child->mm->context.size))
++              if (unlikely(seg >= child->mm->context.size))
+                       addr = -1L; /* bogus selector, access would fault */
+               else {
+                       desc = child->mm->context.ldt + seg;
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+                       addr += base;
+               }
+               mutex_unlock(&child->mm->context.lock);
+-      }
++      } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++              addr = ktla_ktva(addr);
+       return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+       unsigned char opcode[15];
+       unsigned long addr = convert_ip_to_linear(child, regs);
++      if (addr == -EINVAL)
++              return 0;
++
+       copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+       for (i = 0; i < copied; i++) {
+               switch (opcode[i]) {
+diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
+new file mode 100644
+index 0000000..5877189
+--- /dev/null
++++ b/arch/x86/kernel/sys_i386_32.c
+@@ -0,0 +1,189 @@
++/*
++ * This file contains various random system calls that
++ * have a non-standard calling sequence on the Linux/i386
++ * platform.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/msg.h>
++#include <linux/shm.h>
++#include <linux/stat.h>
++#include <linux/syscalls.h>
++#include <linux/mman.h>
++#include <linux/file.h>
++#include <linux/utsname.h>
++#include <linux/ipc.h>
++#include <linux/elf.h>
++
++#include <linux/uaccess.h>
++#include <linux/unistd.h>
++
++#include <asm/syscalls.h>
++
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++      unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      if (flags & MAP_FIXED)
++              if (len > pax_task_size || addr > pax_task_size - len)
++                      return -EINVAL;
++
++      return 0;
++}
++
++/*
++ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
++ */
++static unsigned long get_align_mask(void)
++{
++      if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
++              return 0;
++
++      if (!(current->flags & PF_RANDOMIZE))
++              return 0;
++
++      return va_align.mask;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++              unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++      struct mm_struct *mm = current->mm;
++      struct vm_area_struct *vma;
++      unsigned long pax_task_size = TASK_SIZE;
++      struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (len > pax_task_size)
++              return -ENOMEM;
++
++      if (flags & MAP_FIXED)
++              return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++      if (addr) {
++              addr = PAGE_ALIGN(addr);
++              if (pax_task_size - len >= addr) {
++                      vma = find_vma(mm, addr);
++                      if (check_heap_stack_gap(vma, addr, len, offset))
++                              return addr;
++              }
++      }
++
++      info.flags = 0;
++      info.length = len;
++      info.align_mask = filp ? get_align_mask() : 0;
++      info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
++              info.low_limit = 0x00110000UL;
++              info.high_limit = mm->start_code;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++              if (info.low_limit < info.high_limit) {
++                      addr = vm_unmapped_area(&info);
++                      if (!IS_ERR_VALUE(addr))
++                              return addr;
++              }
++      } else
++#endif
++
++      info.low_limit = mm->mmap_base;
++      info.high_limit = pax_task_size;
++
++      return vm_unmapped_area(&info);
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++                        const unsigned long len, const unsigned long pgoff,
++                        const unsigned long flags)
++{
++      struct vm_area_struct *vma;
++      struct mm_struct *mm = current->mm;
++      unsigned long addr = addr0, pax_task_size = TASK_SIZE;
++      struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      /* requested length too big for entire address space */
++      if (len > pax_task_size)
++              return -ENOMEM;
++
++      if (flags & MAP_FIXED)
++              return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++              goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++      /* requesting a specific address */
++      if (addr) {
++              addr = PAGE_ALIGN(addr);
++              if (pax_task_size - len >= addr) {
++                      vma = find_vma(mm, addr);
++                      if (check_heap_stack_gap(vma, addr, len, offset))
++                              return addr;
++              }
++      }
++
++      info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++      info.length = len;
++      info.low_limit = PAGE_SIZE;
++      info.high_limit = mm->mmap_base;
++      info.align_mask = filp ? get_align_mask() : 0;
++      info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
++
++      addr = vm_unmapped_area(&info);
++      if (!(addr & ~PAGE_MASK))
++              return addr;
++      VM_BUG_ON(addr != -ENOMEM);
++
++bottomup:
++      /*
++       * A failed mmap() very likely causes application failure,
++       * so fall back to the bottom-up function here. This scenario
++       * can happen with large stack limits and large mmap()
++       * allocations.
++       */
++      return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++}
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 48f8375..ace2781 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -81,8 +81,8 @@ out:
+       return error;
+ }
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+-                         unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++                         unsigned long *begin, unsigned long *end)
+ {
+       if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
+               unsigned long new_begin;
+@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+                               *begin = new_begin;
+               }
+       } else {
+-              *begin = mmap_legacy_base();
++              *begin = mm->mmap_base;
+               *end = TASK_SIZE;
+       }
+ }
+@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
+       unsigned long begin, end;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (flags & MAP_FIXED)
+               return addr;
+-      find_start_end(flags, &begin, &end);
++      find_start_end(mm, flags, &begin, &end);
+       if (len > end)
+               return -ENOMEM;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (end - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       info.high_limit = end;
+       info.align_mask = filp ? get_align_mask() : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
+               goto bottomup;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = filp ? get_align_mask() : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               return addr;
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index f84fe00..f41d9f1 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
+ void tboot_shutdown(u32 shutdown_type)
+ {
+-      void (*shutdown)(void);
++      void (* __noreturn shutdown)(void);
+       if (!tboot_enabled())
+               return;
+@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
+       switch_to_tboot_pt();
+-      shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
++      shutdown = (void *)tboot->shutdown_entry;
+       shutdown();
+       /* should not reach here */
+@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+       return 0;
+ }
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
+ {
+       switch (action) {
+       case CPU_DYING:
+-              atomic_inc(&ap_wfs_count);
++              atomic_inc_unchecked(&ap_wfs_count);
+               if (num_online_cpus() == 1)
+-                      if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++                      if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+                               return NOTIFY_BAD;
+               break;
+       }
+       return NOTIFY_OK;
+ }
+-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
++static struct notifier_block tboot_cpu_notifier =
+ {
+       .notifier_call = tboot_cpu_callback,
+ };
+@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
+       tboot_create_trampoline();
+-      atomic_set(&ap_wfs_count, 0);
++      atomic_set_unchecked(&ap_wfs_count, 0);
+       register_hotcpu_notifier(&tboot_cpu_notifier);
+       acpi_os_set_prepare_sleep(&tboot_sleep);
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index 24d3c91..d06b473 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
+ {
+       unsigned long pc = instruction_pointer(regs);
+-      if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++      if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+-              return *(unsigned long *)(regs->bp + sizeof(long));
++              return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+               unsigned long *sp =
+                       (unsigned long *)kernel_stack_pointer(regs);
+@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+                * or above a saved flags. Eflags has bits 22-31 zero,
+                * kernel addresses don't.
+                */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              return ktla_ktva(sp[0]);
++#else
+               if (sp[0] >> 22)
+                       return sp[0];
+               if (sp[1] >> 22)
+                       return sp[1];
+ #endif
++
++#endif
+       }
+       return pc;
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index f7fec09..9991981 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++              return -EINVAL;
++#endif
++
+       set_tls_desc(p, idx, &info, 1);
+       return 0;
+@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+       if (kbuf)
+               info = kbuf;
+-      else if (__copy_from_user(infobuf, ubuf, count))
++      else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
+               return -EFAULT;
+       else
+               info = infobuf;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 772e2a8..bad5bf6 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -68,12 +68,6 @@
+ #include <asm/setup.h>
+ asmlinkage int system_call(void);
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.
+- */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+ #endif
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ }
+ static int __kprobes
+-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
++do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
+                 struct pt_regs *regs, long error_code)
+ {
+ #ifdef CONFIG_X86_32
+-      if (regs->flags & X86_VM_MASK) {
++      if (v8086_mode(regs)) {
+               /*
+                * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+                * On nmi (interrupt 2), do_trap should not be called.
+@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+               return -1;
+       }
+ #endif
+-      if (!user_mode(regs)) {
++      if (!user_mode_novm(regs)) {
+               if (!fixup_exception(regs)) {
+                       tsk->thread.error_code = error_code;
+                       tsk->thread.trap_nr = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++                      if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++                              str = "PAX: suspicious stack segment fault";
++#endif
++
+                       die(str, regs, error_code);
+               }
++
++#ifdef CONFIG_PAX_REFCOUNT
++              if (trapnr == 4)
++                      pax_report_refcount_overflow(regs);
++#endif
++
+               return 0;
+       }
+@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+ }
+ static void __kprobes
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+       long error_code, siginfo_t *info)
+ {
+       struct task_struct *tsk = current;
+@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+       if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
+           printk_ratelimit()) {
+               pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+-                      tsk->comm, tsk->pid, str,
++                      tsk->comm, task_pid_nr(tsk), str,
+                       regs->ip, regs->sp, error_code);
+               print_vma_addr(" in ", regs->ip);
+               pr_cont("\n");
+@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+       conditional_sti(regs);
+ #ifdef CONFIG_X86_32
+-      if (regs->flags & X86_VM_MASK) {
++      if (v8086_mode(regs)) {
+               local_irq_enable();
+               handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+               goto exit;
+@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ #endif
+       tsk = current;
+-      if (!user_mode(regs)) {
++      if (!user_mode_novm(regs)) {
+               if (fixup_exception(regs))
+                       goto exit;
+               tsk->thread.error_code = error_code;
+               tsk->thread.trap_nr = X86_TRAP_GP;
+               if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+-                             X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
++                             X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++                      die("PAX: suspicious general protection fault", regs, error_code);
++              else
++#endif
++
+                       die("general protection fault", regs, error_code);
++              }
+               goto exit;
+       }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++      if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++              struct mm_struct *mm = tsk->mm;
++              unsigned long limit;
++
++              down_write(&mm->mmap_sem);
++              limit = mm->context.user_cs_limit;
++              if (limit < TASK_SIZE) {
++                      track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++                      up_write(&mm->mmap_sem);
++                      return;
++              }
++              up_write(&mm->mmap_sem);
++      }
++#endif
++
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_nr = X86_TRAP_GP;
+@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+       /* It's safe to allow irq's after DR6 has been saved */
+       preempt_conditional_sti(regs);
+-      if (regs->flags & X86_VM_MASK) {
++      if (v8086_mode(regs)) {
+               handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+                                       X86_TRAP_DB);
+               preempt_conditional_cli(regs);
+@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+        * We already checked v86 mode above, so we can check for kernel mode
+        * by just checking the CPL of CS.
+        */
+-      if ((dr6 & DR_STEP) && !user_mode(regs)) {
++      if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
+               tsk->thread.debugreg6 &= ~DR_STEP;
+               set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+               regs->flags &= ~X86_EFLAGS_TF;
+@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+               return;
+       conditional_sti(regs);
+-      if (!user_mode_vm(regs))
++      if (!user_mode(regs))
+       {
+               if (!fixup_exception(regs)) {
+                       task->thread.error_code = error_code;
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 2ed8459..7cf329f 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
+       int ret = NOTIFY_DONE;
+       /* We are only interested in userspace traps */
+-      if (regs && !user_mode_vm(regs))
++      if (regs && !user_mode(regs))
+               return NOTIFY_DONE;
+       switch (val) {
+@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
+       if (ncopied != rasize) {
+               pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
+-                      "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
++                      "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
+               force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
+       }
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index b9242ba..50c5edd 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -20,6 +20,7 @@
+  *    arch/x86/boot/compressed/head_64.S: Boot cpu verification
+  *    arch/x86/kernel/trampoline_64.S: secondary processor verification
+  *    arch/x86/kernel/head_32.S: processor startup
++ *    arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
+  *
+  *    verify_cpu, returns the status of longmode and SSE in register %eax.
+  *            0: Success    1: Failure
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index e8edcf5..27f9344 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -44,6 +44,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/audit.h>
+ #include <linux/stddef.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+               do_exit(SIGSEGV);
+       }
+-      tss = &per_cpu(init_tss, get_cpu());
++      tss = init_tss + get_cpu();
+       current->thread.sp0 = current->thread.saved_sp0;
+       current->thread.sysenter_cs = __KERNEL_CS;
+       load_sp0(tss, &current->thread);
+@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
+       if (tsk->thread.saved_sp0)
+               return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC_VM86
++      if (!capable(CAP_SYS_RAWIO)) {
++              gr_handle_vm86();
++              return -EPERM;
++      }
++#endif
++
+       tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+                                      offsetof(struct kernel_vm86_struct, vm86plus) -
+                                      sizeof(info.regs));
+@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
+       int tmp;
+       struct vm86plus_struct __user *v86;
++#ifdef CONFIG_GRKERNSEC_VM86
++      if (!capable(CAP_SYS_RAWIO)) {
++              gr_handle_vm86();
++              return -EPERM;
++      }
++#endif
++
+       tsk = current;
+       switch (cmd) {
+       case VM86_REQUEST_IRQ:
+@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+       tsk->thread.saved_fs = info->regs32->fs;
+       tsk->thread.saved_gs = get_user_gs(info->regs32);
+-      tss = &per_cpu(init_tss, get_cpu());
++      tss = init_tss + get_cpu();
+       tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+       if (cpu_has_sep)
+               tsk->thread.sysenter_cs = 0;
+@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+               goto cannot_handle;
+       if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+               goto cannot_handle;
+-      intr_ptr = (unsigned long __user *) (i << 2);
++      intr_ptr = (__force unsigned long __user *) (i << 2);
+       if (get_user(segoffs, intr_ptr))
+               goto cannot_handle;
+       if ((segoffs >> 16) == BIOSSEG)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 10c4f30..57377c2 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET  (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET  0
++#endif
+ #undef i386     /* in case the preprocessor is a 32bit one */
+@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
+ PHDRS {
+       text PT_LOAD FLAGS(5);          /* R_E */
++#ifdef CONFIG_X86_32
++      module PT_LOAD FLAGS(5);        /* R_E */
++#endif
++#ifdef CONFIG_XEN
++      rodata PT_LOAD FLAGS(5);        /* R_E */
++#else
++      rodata PT_LOAD FLAGS(4);        /* R__ */
++#endif
+       data PT_LOAD FLAGS(6);          /* RW_ */
+-#ifdef CONFIG_X86_64
++      init.begin PT_LOAD FLAGS(6);    /* RW_ */
+ #ifdef CONFIG_SMP
+       percpu PT_LOAD FLAGS(6);        /* RW_ */
+ #endif
++      text.init PT_LOAD FLAGS(5);     /* R_E */
++      text.exit PT_LOAD FLAGS(5);     /* R_E */
+       init PT_LOAD FLAGS(7);          /* RWE */
+-#endif
+       note PT_NOTE FLAGS(0);          /* ___ */
+ }
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+-        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+-        phys_startup_32 = startup_32 - LOAD_OFFSET;
++      . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+-        . = __START_KERNEL;
+-        phys_startup_64 = startup_64 - LOAD_OFFSET;
++      . = __START_KERNEL;
+ #endif
+       /* Text and read-only data */
+-      .text :  AT(ADDR(.text) - LOAD_OFFSET) {
+-              _text = .;
++      .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+               /* bootstrapping code */
++#ifdef CONFIG_X86_32
++              phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++              phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++              __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++              _text = .;
+               HEAD_TEXT
+               . = ALIGN(8);
+               _stext = .;
+@@ -104,13 +124,48 @@ SECTIONS
+               IRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+-              /* End of text section */
+-              _etext = .;
+       } :text = 0x9090
+-      NOTES :text :note
++      . += __KERNEL_TEXT_OFFSET;
+-      EXCEPTION_TABLE(16) :text = 0x9090
++#ifdef CONFIG_X86_32
++      . = ALIGN(PAGE_SIZE);
++      .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++              MODULES_EXEC_VADDR = .;
++              BYTE(0)
++              . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++              . = ALIGN(HPAGE_SIZE) - 1;
++              MODULES_EXEC_END = .;
++#endif
++
++      } :module
++#endif
++
++      .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++              /* End of text section */
++              BYTE(0)
++              _etext = . - __KERNEL_TEXT_OFFSET;
++      }
++
++#ifdef CONFIG_X86_32
++      . = ALIGN(PAGE_SIZE);
++      .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++              *(.idt)
++              . = ALIGN(PAGE_SIZE);
++              *(.empty_zero_page)
++              *(.initial_pg_fixmap)
++              *(.initial_pg_pmd)
++              *(.initial_page_table)
++              *(.swapper_pg_dir)
++      } :rodata
++#endif
++
++      . = ALIGN(PAGE_SIZE);
++      NOTES :rodata :note
++
++      EXCEPTION_TABLE(16) :rodata
+ #if defined(CONFIG_DEBUG_RODATA)
+       /* .text should occupy whole number of pages */
+@@ -122,16 +177,20 @@ SECTIONS
+       /* Data */
+       .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++              . = ALIGN(HPAGE_SIZE);
++#else
++              . = ALIGN(PAGE_SIZE);
++#endif
++
+               /* Start of data section */
+               _sdata = .;
+               /* init_task */
+               INIT_TASK_DATA(THREAD_SIZE)
+-#ifdef CONFIG_X86_32
+-              /* 32 bit has nosave before _edata */
+               NOSAVE_DATA
+-#endif
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+@@ -172,12 +231,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+       /* Init code and data - will be freed after init */
+-      . = ALIGN(PAGE_SIZE);
+       .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++              BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++              . = ALIGN(HPAGE_SIZE);
++#else
++              . = ALIGN(PAGE_SIZE);
++#endif
++
+               __init_begin = .; /* paired with __init_end */
+-      }
++      } :init.begin
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+       /*
+        * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+        * output PHDR, so the next output section - .init.text - should
+@@ -186,12 +252,27 @@ SECTIONS
+       PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
+ #endif
+-      INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+-      :init
+-#endif
++      . = ALIGN(PAGE_SIZE);
++      init_begin = .;
++      .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++              VMLINUX_SYMBOL(_sinittext) = .;
++              INIT_TEXT
++              VMLINUX_SYMBOL(_einittext) = .;
++              . = ALIGN(PAGE_SIZE);
++      } :text.init
+-      INIT_DATA_SECTION(16)
++      /*
++       * .exit.text is discard at runtime, not link time, to deal with
++       *  references from .altinstructions and .eh_frame
++       */
++      .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++              EXIT_TEXT
++              . = ALIGN(16);
++      } :text.exit
++      . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
++
++      . = ALIGN(PAGE_SIZE);
++      INIT_DATA_SECTION(16) :init
+       .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
+               __x86_cpu_dev_start = .;
+@@ -253,19 +334,12 @@ SECTIONS
+       }
+       . = ALIGN(8);
+-      /*
+-       * .exit.text is discard at runtime, not link time, to deal with
+-       *  references from .altinstructions and .eh_frame
+-       */
+-      .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+-              EXIT_TEXT
+-      }
+       .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+               EXIT_DATA
+       }
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+       PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ #endif
+@@ -284,16 +358,10 @@ SECTIONS
+       .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+               __smp_locks = .;
+               *(.smp_locks)
+-              . = ALIGN(PAGE_SIZE);
+               __smp_locks_end = .;
++              . = ALIGN(PAGE_SIZE);
+       }
+-#ifdef CONFIG_X86_64
+-      .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+-              NOSAVE_DATA
+-      }
+-#endif
+-
+       /* BSS */
+       . = ALIGN(PAGE_SIZE);
+       .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -309,6 +377,7 @@ SECTIONS
+               __brk_base = .;
+               . += 64 * 1024;         /* 64k alignment slop space */
+               *(.brk_reservation)     /* areas brk users have reserved */
++              . = ALIGN(HPAGE_SIZE);
+               __brk_limit = .;
+       }
+@@ -335,13 +404,12 @@ SECTIONS
+  * for the boot processor.
+  */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+ /*
+  * Build-time check on the image size:
+  */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+          "kernel image bigger than KERNEL_IMAGE_SIZE");
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index 9a907a6..f83f921 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -56,15 +56,13 @@
+ DEFINE_VVAR(int, vgetcpu_mode);
+ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+ static int __init vsyscall_setup(char *str)
+ {
+       if (str) {
+               if (!strcmp("emulate", str))
+                       vsyscall_mode = EMULATE;
+-              else if (!strcmp("native", str))
+-                      vsyscall_mode = NATIVE;
+               else if (!strcmp("none", str))
+                       vsyscall_mode = NONE;
+               else
+@@ -323,8 +321,7 @@ do_ret:
+       return true;
+ sigsegv:
+-      force_sig(SIGSEGV, current);
+-      return true;
++      do_group_exit(SIGKILL);
+ }
+ /*
+@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
+       extern char __vvar_page;
+       unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+-      __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+-                   vsyscall_mode == NATIVE
+-                   ? PAGE_KERNEL_VSYSCALL
+-                   : PAGE_KERNEL_VVAR);
++      __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
+                    (unsigned long)VSYSCALL_START);
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
+index b014d94..e775258 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(copy_user_enhanced_fast_string);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
+ #ifndef CONFIG_PARAVIRT
+ EXPORT_SYMBOL(native_load_gs_index);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 45a14db..075bb9b 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
+       },
+ };
+-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
++struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
+       .early_percpu_clock_init        = x86_init_noop,
+       .setup_percpu_clockev           = setup_secondary_APIC_clock,
+ };
+@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
+ static void default_nmi_init(void) { };
+ static int default_i8042_detect(void) { return 1; };
+-struct x86_platform_ops x86_platform = {
++struct x86_platform_ops x86_platform __read_only = {
+       .calibrate_tsc                  = native_calibrate_tsc,
+       .get_wallclock                  = mach_get_cmos_time,
+       .set_wallclock                  = mach_set_rtc_mmss,
+@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
+ };
+ EXPORT_SYMBOL_GPL(x86_platform);
+-struct x86_msi_ops x86_msi = {
++struct x86_msi_ops x86_msi __read_only = {
+       .setup_msi_irqs         = native_setup_msi_irqs,
+       .compose_msi_msg        = native_compose_msi_msg,
+       .teardown_msi_irq       = native_teardown_msi_irq,
+@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
+       .setup_hpet_msi         = default_setup_hpet_msi,
+ };
+-struct x86_io_apic_ops x86_io_apic_ops = {
++struct x86_io_apic_ops x86_io_apic_ops __read_only = {
+       .init                   = native_io_apic_init_mappings,
+       .read                   = native_io_apic_read,
+       .write                  = native_io_apic_write,
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index ada87a3..afea76d 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
+ {
+       int err;
++      buf = (struct xsave_struct __user *)____m(buf);
+       if (use_xsave())
+               err = xsave_user(buf);
+       else if (use_fxsr())
+@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+  */
+ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
+ {
++      buf = (void __user *)____m(buf);
+       if (use_xsave()) {
+               if ((unsigned long)buf % 64 || fx_only) {
+                       u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index a20ecb5..d0e2194 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+-      int r;
++      int r, i;
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -EFAULT;
+-      if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+-                         cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++      if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
++      for (i = 0; i < cpuid->nent; ++i) {
++              struct kvm_cpuid_entry2 cpuid_entry;
++              if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++                      goto out;
++              vcpu->arch.cpuid_entries[i] = cpuid_entry;
++      }
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+-      int r;
++      int r, i;
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+       r = -EFAULT;
+-      if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+-                       vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++      if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
++      for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++              struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++              if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++                      goto out;
++      }
+       return 0;
+ out:
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 5953dce..f11a7d2 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
+ #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
+       do {                                                            \
++              unsigned long _tmp;                                     \
+               __asm__ __volatile__ (                                  \
+                       _PRE_EFLAGS("0", "4", "2")                      \
+                       _op _suffix " %"_x"3,%1; "                      \
+@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy)                \
+       do {                                                            \
+-              unsigned long _tmp;                                     \
+-                                                                      \
+               switch ((ctxt)->dst.bytes) {                            \
+               case 2:                                                 \
+                       ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16);      \
+@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
+ #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)                    \
+       do {                                                                 \
+-              unsigned long _tmp;                                          \
+               switch ((ctxt)->dst.bytes) {                                 \
+               case 1:                                                      \
+                       ____emulate_2op(ctxt,_op,_bx,_by,"b",u8);            \
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 0eee2c8..94a32c3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -55,7 +55,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+ #define APIC_LVT_NUM                  6
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index da20860..d19fdf5 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -208,7 +208,7 @@ retry_walk:
+               if (unlikely(kvm_is_error_hva(host_addr)))
+                       goto error;
+-              ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
++              ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
+               if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+                       goto error;
+               walker->ptep_user[walker->level - 1] = ptep_user;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index a14a6ea..dc86cf0 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+       int cpu = raw_smp_processor_id();
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++      pax_open_kernel();
+       sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++      pax_close_kernel();
++
+       load_TR_desc();
+ }
+@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ #endif
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+       reload_tss(vcpu);
+       local_irq_disable();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 5402c94..c3bdeee 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
+ #endif
+ }
+-static void vmcs_clear_bits(unsigned long field, u32 mask)
++static void vmcs_clear_bits(unsigned long field, unsigned long mask)
+ {
+       vmcs_writel(field, vmcs_readl(field) & ~mask);
+ }
+-static void vmcs_set_bits(unsigned long field, u32 mask)
++static void vmcs_set_bits(unsigned long field, unsigned long mask)
+ {
+       vmcs_writel(field, vmcs_readl(field) | mask);
+ }
+@@ -1517,7 +1517,11 @@ static void reload_tss(void)
+       struct desc_struct *descs;
+       descs = (void *)gdt->address;
++
++      pax_open_kernel();
+       descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++      pax_close_kernel();
++
+       load_TR_desc();
+ }
+@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+               vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+               vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++#endif
++
+               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+               vmx->loaded_vmcs->cpu = cpu;
+@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
+       if (!cpu_has_vmx_flexpriority())
+               flexpriority_enabled = 0;
+-      if (!cpu_has_vmx_tpr_shadow())
+-              kvm_x86_ops->update_cr8_intercept = NULL;
++      if (!cpu_has_vmx_tpr_shadow()) {
++              pax_open_kernel();
++              *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++              pax_close_kernel();
++      }
+       if (enable_ept && !cpu_has_vmx_ept_2m_page())
+               kvm_disable_largepages();
+@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
+       if (!cpu_has_vmx_apicv())
+               enable_apicv = 0;
++      pax_open_kernel();
+       if (enable_apicv)
+-              kvm_x86_ops->update_cr8_intercept = NULL;
++              *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
+       else {
+-              kvm_x86_ops->hwapic_irr_update = NULL;
+-              kvm_x86_ops->deliver_posted_interrupt = NULL;
+-              kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
++              *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
++              *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
++              *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+       }
++      pax_close_kernel();
+       if (nested)
+               nested_vmx_setup_ctls_msrs();
+@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+       vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
+       vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+       vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++#endif
+       vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
+       vmx->host_idt_base = dt.address;
+-      vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
++      vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
+       rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+       vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+               "jmp 2f \n\t"
+               "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+               "2: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              "ljmp %[cs],$3f\n\t"
++              "3: "
++#endif
++
+               /* Save guest registers, load host registers, keep flags */
+               "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
+               "pop %0 \n\t"
+@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+               [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ,[cs]"i"(__KERNEL_CS)
++#endif
++
+             : "cc", "memory"
+ #ifdef CONFIG_X86_64
+               , "rax", "rbx", "rdi", "rsi"
+@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+       if (debugctlmsr)
+               update_debugctlmsr(debugctlmsr);
+-#ifndef CONFIG_X86_64
++#ifdef CONFIG_X86_32
+       /*
+        * The sysexit path does not restore ds/es, so we must set them to
+        * a reasonable value ourselves.
+@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+        * may be executed in interrupt context, which saves and restore segments
+        * around it, nullifying its effect.
+        */
+-      loadsegment(ds, __USER_DS);
+-      loadsegment(es, __USER_DS);
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ #endif
+       vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e8ba99c..ee9d7d9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       int lm = is_long_mode(vcpu);
+-      u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+-              : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++      u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++              : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+       u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+               : kvm->arch.xen_hvm_config.blob_size_32;
+       u32 page_num = data & ~PAGE_MASK;
+@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+               if (n < msr_list.nmsrs)
+                       goto out;
+               r = -EFAULT;
++              if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++                      goto out;
+               if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+                                num_msrs_to_save * sizeof(u32)))
+                       goto out;
+@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+ };
+ #endif
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+       int r;
+       struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index 7114c63..a1018fc 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+  * Rebooting also tells the Host we're finished, but the RESTART flag tells the
+  * Launcher to reboot us.
+  */
+-static void lguest_restart(char *reason)
++static __noreturn void lguest_restart(char *reason)
+ {
+       hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
++      BUG();
+ }
+ /*G:050
+diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
+index 00933d5..3a64af9 100644
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -48,6 +48,10 @@ BEGIN(read)
+       movl  (v), %eax
+       movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++RET_ENDP
+ #undef v
+ #define v %esi
+@@ -55,6 +59,10 @@ BEGIN(set)
+       movl %ebx,  (v)
+       movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++      movl %ebx,  (v)
++      movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+ #define v  %esi
+@@ -70,6 +78,20 @@ RET_ENDP
+ BEGIN(add)
+       addl %eax,  (v)
+       adcl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 0f
++      subl %eax,  (v)
++      sbbl %edx, 4(v)
++      int $4
++0:
++      _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(add_unchecked)
++      addl %eax,  (v)
++      adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+@@ -77,6 +99,24 @@ RET_ENDP
+ BEGIN(add_return)
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(add_return_unchecked)
++      addl  (v), %eax
++      adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -86,6 +126,20 @@ RET_ENDP
+ BEGIN(sub)
+       subl %eax,  (v)
+       sbbl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 0f
++      addl %eax,  (v)
++      adcl %edx, 4(v)
++      int $4
++0:
++      _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(sub_unchecked)
++      subl %eax,  (v)
++      sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+@@ -96,6 +150,27 @@ BEGIN(sub_return)
+       sbbl $0, %edx
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(sub_return_unchecked)
++      negl %edx
++      negl %eax
++      sbbl $0, %edx
++      addl  (v), %eax
++      adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -105,6 +180,20 @@ RET_ENDP
+ BEGIN(inc)
+       addl $1,  (v)
+       adcl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 0f
++      subl $1,  (v)
++      sbbl $0, 4(v)
++      int $4
++0:
++      _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(inc_unchecked)
++      addl $1,  (v)
++      adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+@@ -114,6 +203,26 @@ BEGIN(inc_return)
+       movl 4(v), %edx
+       addl $1, %eax
+       adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(inc_return_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++      addl $1, %eax
++      adcl $0, %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -123,6 +232,20 @@ RET_ENDP
+ BEGIN(dec)
+       subl $1,  (v)
+       sbbl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 0f
++      addl $1,  (v)
++      adcl $0, 4(v)
++      int $4
++0:
++      _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(dec_unchecked)
++      subl $1,  (v)
++      sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+@@ -132,6 +255,26 @@ BEGIN(dec_return)
+       movl 4(v), %edx
+       subl $1, %eax
+       sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(dec_return_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++      subl $1, %eax
++      sbbl $0, %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -143,6 +286,13 @@ BEGIN(add_unless)
+       adcl %edx, %edi
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
+       cmpl %eax, %ecx
+       je 3f
+ 1:
+@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+ 1:
+       addl $1, %eax
+       adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
+       movl %eax,  (v)
+       movl %edx, 4(v)
+       movl $1, %eax
+@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+       movl 4(v), %edx
+       subl $1, %eax
+       sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 1f)
++#endif
++
+       js 1f
+       movl %eax,  (v)
+       movl %edx, 4(v)
+diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
+index f5cc9eb..51fa319 100644
+--- a/arch/x86/lib/atomic64_cx8_32.S
++++ b/arch/x86/lib/atomic64_cx8_32.S
+@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
+       CFI_STARTPROC
+       read64 %ecx
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(atomic64_read_cx8)
++ENTRY(atomic64_read_unchecked_cx8)
++      CFI_STARTPROC
++
++      read64 %ecx
++      pax_force_retaddr
++      ret
++      CFI_ENDPROC
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+       CFI_STARTPROC
+@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
+       cmpxchg8b (%esi)
+       jne 1b
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(atomic64_set_cx8)
++ENTRY(atomic64_set_unchecked_cx8)
++      CFI_STARTPROC
++
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++      cmpxchg8b (%esi)
++      jne 1b
++
++      pax_force_retaddr
++      ret
++      CFI_ENDPROC
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+       CFI_STARTPROC
+@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
+       cmpxchg8b (%esi)
+       jne 1b
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(atomic64_xchg_cx8)
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+       CFI_STARTPROC
+       SAVE ebp
+       SAVE ebx
+@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
+       movl %edx, %ecx
+       \ins\()l %esi, %ebx
+       \insc\()l %edi, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++2:
++      _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%ebp)
+       jne 1b
+-
+-10:
+       movl %ebx, %eax
+       movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+       RESTORE edi
+       RESTORE esi
+       RESTORE ebx
+       RESTORE ebp
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+       CFI_STARTPROC
+       SAVE ebx
+@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
+       movl %edx, %ecx
+       \ins\()l $1, %ebx
+       \insc\()l $0, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++2:
++      _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+-10:
+       movl %ebx, %eax
+       movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+       RESTORE ebx
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+ ENTRY(atomic64_dec_if_positive_cx8)
+       CFI_STARTPROC
+@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+       movl %edx, %ecx
+       subl $1, %ebx
+       sbb $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 2f)
++#endif
++
+       js 2f
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+       movl %ebx, %eax
+       movl %ecx, %edx
+       RESTORE ebx
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(atomic64_dec_if_positive_cx8)
+@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
+       movl %edx, %ecx
+       addl %ebp, %ebx
+       adcl %edi, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 3f)
++#endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
+       CFI_ADJUST_CFA_OFFSET -8
+       RESTORE ebx
+       RESTORE ebp
++      pax_force_retaddr
+       ret
+ 4:
+       cmpl %edx, 4(%esp)
+@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+       xorl %ecx, %ecx
+       addl $1, %ebx
+       adcl %edx, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++      into
++1234:
++      _ASM_EXTABLE(1234b, 3f)
++#endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
+       movl $1, %eax
+ 3:
+       RESTORE ebx
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(atomic64_inc_not_zero_cx8)
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index e78b8ee..7e173a8 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -29,7 +29,8 @@
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-                              
++#include <asm/segment.h>
++
+ /*
+  * computes a partial checksum, e.g. for TCP/UDP fragments
+  */
+@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+ #define ARGBASE 16            
+ #define FP            12
+-              
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+       CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl_cfi %gs
++      popl_cfi %es
++      jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl_cfi %gs
++      popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+       subl  $4,%esp   
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edi
+@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
+       jmp 4f
+ SRC(1:        movw (%esi), %bx        )
+       addl $2, %esi
+-DST(  movw %bx, (%edi)        )
++DST(  movw %bx, %es:(%edi)    )
+       addl $2, %edi
+       addw %bx, %ax   
+       adcl $0, %eax
+@@ -329,30 +345,30 @@ DST(     movw %bx, (%edi)        )
+ SRC(1:        movl (%esi), %ebx       )
+ SRC(  movl 4(%esi), %edx      )
+       adcl %ebx, %eax
+-DST(  movl %ebx, (%edi)       )
++DST(  movl %ebx, %es:(%edi)   )
+       adcl %edx, %eax
+-DST(  movl %edx, 4(%edi)      )
++DST(  movl %edx, %es:4(%edi)  )
+ SRC(  movl 8(%esi), %ebx      )
+ SRC(  movl 12(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 8(%edi)      )
++DST(  movl %ebx, %es:8(%edi)  )
+       adcl %edx, %eax
+-DST(  movl %edx, 12(%edi)     )
++DST(  movl %edx, %es:12(%edi) )
+ SRC(  movl 16(%esi), %ebx     )
+ SRC(  movl 20(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 16(%edi)     )
++DST(  movl %ebx, %es:16(%edi) )
+       adcl %edx, %eax
+-DST(  movl %edx, 20(%edi)     )
++DST(  movl %edx, %es:20(%edi) )
+ SRC(  movl 24(%esi), %ebx     )
+ SRC(  movl 28(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 24(%edi)     )
++DST(  movl %ebx, %es:24(%edi) )
+       adcl %edx, %eax
+-DST(  movl %edx, 28(%edi)     )
++DST(  movl %edx, %es:28(%edi) )
+       lea 32(%esi), %esi
+       lea 32(%edi), %edi
+@@ -366,7 +382,7 @@ DST(       movl %edx, 28(%edi)     )
+       shrl $2, %edx                   # This clears CF
+ SRC(3:        movl (%esi), %ebx       )
+       adcl %ebx, %eax
+-DST(  movl %ebx, (%edi)       )
++DST(  movl %ebx, %es:(%edi)   )
+       lea 4(%esi), %esi
+       lea 4(%edi), %edi
+       dec %edx
+@@ -378,12 +394,12 @@ DST(     movl %ebx, (%edi)       )
+       jb 5f
+ SRC(  movw (%esi), %cx        )
+       leal 2(%esi), %esi
+-DST(  movw %cx, (%edi)        )
++DST(  movw %cx, %es:(%edi)    )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%ecx
+ SRC(5:        movb (%esi), %cl        )
+-DST(  movb %cl, (%edi)        )
++DST(  movb %cl, %es:(%edi)    )
+ 6:    addl %ecx, %eax
+       adcl $0, %eax
+ 7:
+@@ -394,7 +410,7 @@ DST(       movb %cl, (%edi)        )
+ 6001:
+       movl ARGBASE+20(%esp), %ebx     # src_err_ptr
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       # zero the complete destination - computing the rest
+       # is too much work 
+@@ -407,11 +423,15 @@ DST(     movb %cl, (%edi)        )
+ 6002:
+       movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+-      movl $-EFAULT,(%ebx)
++      movl $-EFAULT,%ss:(%ebx)
+       jmp 5000b
+ .previous
++      pushl_cfi %ss
++      popl_cfi %ds
++      pushl_cfi %ss
++      popl_cfi %es
+       popl_cfi %ebx
+       CFI_RESTORE ebx
+       popl_cfi %esi
+@@ -421,26 +441,43 @@ DST(     movb %cl, (%edi)        )
+       popl_cfi %ecx                   # equivalent to addl $4,%esp
+       ret     
+       CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+ #else
+ /* Version for PentiumII/PPro */
+ #define ROUND1(x) \
++      nop; nop; nop;                          \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       addl %ebx, %eax                 ;       \
+-      DST(movl %ebx, x(%edi)  )       ; 
++      DST(movl %ebx, %es:x(%edi))     ;
+ #define ROUND(x) \
++      nop; nop; nop;                          \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       adcl %ebx, %eax                 ;       \
+-      DST(movl %ebx, x(%edi)  )       ;
++      DST(movl %ebx, %es:x(%edi))     ;
+ #define ARGBASE 12
+-              
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+       CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl_cfi %gs
++      popl_cfi %es
++      jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl_cfi %gs
++      popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+       pushl_cfi %ebx
+       CFI_REL_OFFSET ebx, 0
+       pushl_cfi %edi
+@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
+       subl %ebx, %edi  
+       lea  -1(%esi),%edx
+       andl $-32,%edx
+-      lea 3f(%ebx,%ebx), %ebx
++      lea 3f(%ebx,%ebx,2), %ebx
+       testl %esi, %esi 
+       jmp *%ebx
+ 1:    addl $64,%esi
+@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
+       jb 5f
+ SRC(  movw (%esi), %dx         )
+       leal 2(%esi), %esi
+-DST(  movw %dx, (%edi)         )
++DST(  movw %dx, %es:(%edi)     )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%edx
+ 5:
+ SRC(  movb (%esi), %dl         )
+-DST(  movb %dl, (%edi)         )
++DST(  movb %dl, %es:(%edi)     )
+ 6:    addl %edx, %eax
+       adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       # zero the complete destination (computing the rest is too much work)
+       movl ARGBASE+8(%esp),%edi       # dst
+       movl ARGBASE+12(%esp),%ecx      # len
+@@ -502,10 +539,17 @@ DST(     movb %dl, (%edi)         )
+       rep; stosb
+       jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       jmp  7b                 
+ .previous                             
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl_cfi %ss
++      popl_cfi %ds
++      pushl_cfi %ss
++      popl_cfi %es
++#endif
++
+       popl_cfi %esi
+       CFI_RESTORE esi
+       popl_cfi %edi
+@@ -514,7 +558,7 @@ DST(       movb %dl, (%edi)         )
+       CFI_RESTORE ebx
+       ret
+       CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+                               
+ #undef ROUND
+ #undef ROUND1         
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index f2145cf..cea889d 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
+       movl $4096/8,%ecx
+       xorl %eax,%eax
+       rep stosq
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(clear_page_c)
+@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
+       movl $4096,%ecx
+       xorl %eax,%eax
+       rep stosb
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(clear_page_c_e)
+@@ -43,6 +45,7 @@ ENTRY(clear_page)
+       leaq    64(%rdi),%rdi
+       jnz     .Lloop
+       nop
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ .Lclear_page_end:
+@@ -58,7 +61,7 @@ ENDPROC(clear_page)
+ #include <asm/cpufeature.h>
+-      .section .altinstr_replacement,"ax"
++      .section .altinstr_replacement,"a"
+ 1:    .byte 0xeb                                      /* jmp <disp8> */
+       .byte (clear_page_c - clear_page) - (2f - 1b)   /* offset */
+ 2:    .byte 0xeb                                      /* jmp <disp8> */
+diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
+index 1e572c5..2a162cd 100644
+--- a/arch/x86/lib/cmpxchg16b_emu.S
++++ b/arch/x86/lib/cmpxchg16b_emu.S
+@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
+       popf
+       mov $1, %al
++      pax_force_retaddr
+       ret
+  not_same:
+       popf
+       xor %al,%al
++      pax_force_retaddr
+       ret
+ CFI_ENDPROC
+diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
+index 176cca6..1166c50 100644
+--- a/arch/x86/lib/copy_page_64.S
++++ b/arch/x86/lib/copy_page_64.S
+@@ -9,6 +9,7 @@ copy_page_rep:
+       CFI_STARTPROC
+       movl    $4096/8, %ecx
+       rep     movsq
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(copy_page_rep)
+@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
+ ENTRY(copy_page)
+       CFI_STARTPROC
+-      subq    $2*8,   %rsp
+-      CFI_ADJUST_CFA_OFFSET 2*8
++      subq    $3*8,   %rsp
++      CFI_ADJUST_CFA_OFFSET 3*8
+       movq    %rbx,   (%rsp)
+       CFI_REL_OFFSET rbx, 0
+       movq    %r12,   1*8(%rsp)
+       CFI_REL_OFFSET r12, 1*8
++      movq    %r13,   2*8(%rsp)
++      CFI_REL_OFFSET r13, 2*8
+       movl    $(4096/64)-5,   %ecx
+       .p2align 4
+@@ -36,7 +39,7 @@ ENTRY(copy_page)
+       movq    0x8*2(%rsi), %rdx
+       movq    0x8*3(%rsi), %r8
+       movq    0x8*4(%rsi), %r9
+-      movq    0x8*5(%rsi), %r10
++      movq    0x8*5(%rsi), %r13
+       movq    0x8*6(%rsi), %r11
+       movq    0x8*7(%rsi), %r12
+@@ -47,7 +50,7 @@ ENTRY(copy_page)
+       movq    %rdx, 0x8*2(%rdi)
+       movq    %r8,  0x8*3(%rdi)
+       movq    %r9,  0x8*4(%rdi)
+-      movq    %r10, 0x8*5(%rdi)
++      movq    %r13, 0x8*5(%rdi)
+       movq    %r11, 0x8*6(%rdi)
+       movq    %r12, 0x8*7(%rdi)
+@@ -66,7 +69,7 @@ ENTRY(copy_page)
+       movq    0x8*2(%rsi), %rdx
+       movq    0x8*3(%rsi), %r8
+       movq    0x8*4(%rsi), %r9
+-      movq    0x8*5(%rsi), %r10
++      movq    0x8*5(%rsi), %r13
+       movq    0x8*6(%rsi), %r11
+       movq    0x8*7(%rsi), %r12
+@@ -75,7 +78,7 @@ ENTRY(copy_page)
+       movq    %rdx, 0x8*2(%rdi)
+       movq    %r8,  0x8*3(%rdi)
+       movq    %r9,  0x8*4(%rdi)
+-      movq    %r10, 0x8*5(%rdi)
++      movq    %r13, 0x8*5(%rdi)
+       movq    %r11, 0x8*6(%rdi)
+       movq    %r12, 0x8*7(%rdi)
+@@ -87,8 +90,11 @@ ENTRY(copy_page)
+       CFI_RESTORE rbx
+       movq    1*8(%rsp), %r12
+       CFI_RESTORE r12
+-      addq    $2*8, %rsp
+-      CFI_ADJUST_CFA_OFFSET -2*8
++      movq    2*8(%rsp), %r13
++      CFI_RESTORE r13
++      addq    $3*8, %rsp
++      CFI_ADJUST_CFA_OFFSET -3*8
++      pax_force_retaddr
+       ret
+ .Lcopy_page_end:
+       CFI_ENDPROC
+@@ -99,7 +105,7 @@ ENDPROC(copy_page)
+ #include <asm/cpufeature.h>
+-      .section .altinstr_replacement,"ax"
++      .section .altinstr_replacement,"a"
+ 1:    .byte 0xeb                                      /* jmp <disp8> */
+       .byte (copy_page_rep - copy_page) - (2f - 1b)   /* offset */
+ 2:
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index a30ca15..6b3f4e1 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -18,31 +18,7 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+-
+-/*
+- * By placing feature2 after feature1 in altinstructions section, we logically
+- * implement:
+- * If CPU has feature2, jmp to alt2 is used
+- * else if CPU has feature1, jmp to alt1 is used
+- * else jmp to orig is used.
+- */
+-      .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
+-0:
+-      .byte 0xe9      /* 32bit jump */
+-      .long \orig-1f  /* by default jump to orig */
+-1:
+-      .section .altinstr_replacement,"ax"
+-2:    .byte 0xe9                      /* near jump with 32bit immediate */
+-      .long \alt1-1b /* offset */   /* or alternatively to alt1 */
+-3:    .byte 0xe9                      /* near jump with 32bit immediate */
+-      .long \alt2-1b /* offset */   /* or alternatively to alt2 */
+-      .previous
+-
+-      .section .altinstructions,"a"
+-      altinstruction_entry 0b,2b,\feature1,5,5
+-      altinstruction_entry 0b,3b,\feature2,5,5
+-      .previous
+-      .endm
++#include <asm/pgtable.h>
+       .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -70,52 +46,6 @@
+ #endif
+       .endm
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+-      CFI_STARTPROC
+-      GET_THREAD_INFO(%rax)
+-      movq %rdi,%rcx
+-      addq %rdx,%rcx
+-      jc bad_to_user
+-      cmpq TI_addr_limit(%rax),%rcx
+-      ja bad_to_user
+-      ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+-              copy_user_generic_unrolled,copy_user_generic_string,    \
+-              copy_user_enhanced_fast_string
+-      CFI_ENDPROC
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+-      CFI_STARTPROC
+-      GET_THREAD_INFO(%rax)
+-      movq %rsi,%rcx
+-      addq %rdx,%rcx
+-      jc bad_from_user
+-      cmpq TI_addr_limit(%rax),%rcx
+-      ja bad_from_user
+-      ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+-              copy_user_generic_unrolled,copy_user_generic_string,    \
+-              copy_user_enhanced_fast_string
+-      CFI_ENDPROC
+-ENDPROC(_copy_from_user)
+-
+-      .section .fixup,"ax"
+-      /* must zero dest */
+-ENTRY(bad_from_user)
+-bad_from_user:
+-      CFI_STARTPROC
+-      movl %edx,%ecx
+-      xorl %eax,%eax
+-      rep
+-      stosb
+-bad_to_user:
+-      movl %edx,%eax
+-      ret
+-      CFI_ENDPROC
+-ENDPROC(bad_from_user)
+-      .previous
+-
+ /*
+  * copy_user_generic_unrolled - memory copy with exception handling.
+  * This version is for CPUs like P4 that don't have efficient micro
+@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
+  */
+ ENTRY(copy_user_generic_unrolled)
+       CFI_STARTPROC
++      ASM_PAX_OPEN_USERLAND
+       ASM_STAC
+       cmpl $8,%edx
+       jb 20f          /* less then 8 bytes, go to byte copy loop */
+@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
+       jz 17f
+ 1:    movq (%rsi),%r8
+ 2:    movq 1*8(%rsi),%r9
+-3:    movq 2*8(%rsi),%r10
++3:    movq 2*8(%rsi),%rax
+ 4:    movq 3*8(%rsi),%r11
+ 5:    movq %r8,(%rdi)
+ 6:    movq %r9,1*8(%rdi)
+-7:    movq %r10,2*8(%rdi)
++7:    movq %rax,2*8(%rdi)
+ 8:    movq %r11,3*8(%rdi)
+ 9:    movq 4*8(%rsi),%r8
+ 10:   movq 5*8(%rsi),%r9
+-11:   movq 6*8(%rsi),%r10
++11:   movq 6*8(%rsi),%rax
+ 12:   movq 7*8(%rsi),%r11
+ 13:   movq %r8,4*8(%rdi)
+ 14:   movq %r9,5*8(%rdi)
+-15:   movq %r10,6*8(%rdi)
++15:   movq %rax,6*8(%rdi)
+ 16:   movq %r11,7*8(%rdi)
+       leaq 64(%rsi),%rsi
+       leaq 64(%rdi),%rdi
+@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
+       jnz 21b
+ 23:   xor %eax,%eax
+       ASM_CLAC
++      ASM_PAX_CLOSE_USERLAND
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
+  */
+ ENTRY(copy_user_generic_string)
+       CFI_STARTPROC
++      ASM_PAX_OPEN_USERLAND
+       ASM_STAC
+       andl %edx,%edx
+       jz 4f
+@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
+       movsb
+ 4:    xorl %eax,%eax
+       ASM_CLAC
++      ASM_PAX_CLOSE_USERLAND
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
+  */
+ ENTRY(copy_user_enhanced_fast_string)
+       CFI_STARTPROC
++      ASM_PAX_OPEN_USERLAND
+       ASM_STAC
+       andl %edx,%edx
+       jz 2f
+@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
+       movsb
+ 2:    xorl %eax,%eax
+       ASM_CLAC
++      ASM_PAX_CLOSE_USERLAND
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
+index 6a4f43c..55d26f2 100644
+--- a/arch/x86/lib/copy_user_nocache_64.S
++++ b/arch/x86/lib/copy_user_nocache_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+ #define FIX_ALIGNMENT 1
+@@ -16,6 +17,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/pgtable.h>
+       .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -49,6 +51,16 @@
+  */
+ ENTRY(__copy_user_nocache)
+       CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      mov pax_user_shadow_base,%rcx
++      cmp %rcx,%rsi
++      jae 1f
++      add %rcx,%rsi
++1:
++#endif
++
++      ASM_PAX_OPEN_USERLAND
+       ASM_STAC
+       cmpl $8,%edx
+       jb 20f          /* less then 8 bytes, go to byte copy loop */
+@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
+       jz 17f
+ 1:    movq (%rsi),%r8
+ 2:    movq 1*8(%rsi),%r9
+-3:    movq 2*8(%rsi),%r10
++3:    movq 2*8(%rsi),%rax
+ 4:    movq 3*8(%rsi),%r11
+ 5:    movnti %r8,(%rdi)
+ 6:    movnti %r9,1*8(%rdi)
+-7:    movnti %r10,2*8(%rdi)
++7:    movnti %rax,2*8(%rdi)
+ 8:    movnti %r11,3*8(%rdi)
+ 9:    movq 4*8(%rsi),%r8
+ 10:   movq 5*8(%rsi),%r9
+-11:   movq 6*8(%rsi),%r10
++11:   movq 6*8(%rsi),%rax
+ 12:   movq 7*8(%rsi),%r11
+ 13:   movnti %r8,4*8(%rdi)
+ 14:   movnti %r9,5*8(%rdi)
+-15:   movnti %r10,6*8(%rdi)
++15:   movnti %rax,6*8(%rdi)
+ 16:   movnti %r11,7*8(%rdi)
+       leaq 64(%rsi),%rsi
+       leaq 64(%rdi),%rdi
+@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
+       jnz 21b
+ 23:   xorl %eax,%eax
+       ASM_CLAC
++      ASM_PAX_CLOSE_USERLAND
+       sfence
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
+index 2419d5f..953ee51 100644
+--- a/arch/x86/lib/csum-copy_64.S
++++ b/arch/x86/lib/csum-copy_64.S
+@@ -9,6 +9,7 @@
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
++#include <asm/alternative-asm.h>
+ /*
+  * Checksum copy with exception handling.
+@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
+       CFI_RESTORE rbp
+       addq $7*8, %rsp
+       CFI_ADJUST_CFA_OFFSET -7*8
++      pax_force_retaddr 0, 1
+       ret
+       CFI_RESTORE_STATE
+diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
+index 25b7ae8..c40113e 100644
+--- a/arch/x86/lib/csum-wrappers_64.c
++++ b/arch/x86/lib/csum-wrappers_64.c
+@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
+                       len -= 2;
+               }
+       }
+-      isum = csum_partial_copy_generic((__force const void *)src,
++      pax_open_userland();
++      stac();
++      isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
+                               dst, len, isum, errp, NULL);
++      clac();
++      pax_close_userland();
+       if (unlikely(*errp))
+               goto out_err;
+@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
+       }
+       *errp = 0;
+-      return csum_partial_copy_generic(src, (void __force *)dst,
++      pax_open_userland();
++      stac();
++      isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
+                                        len, isum, NULL, errp);
++      clac();
++      pax_close_userland();
++      return isum;
+ }
+ EXPORT_SYMBOL(csum_partial_copy_to_user);
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index a451235..1daa956 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -33,17 +33,40 @@
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
+       .text
+ ENTRY(__get_user_1)
+       CFI_STARTPROC
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       GET_THREAD_INFO(%_ASM_DX)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+       ASM_STAC
+-1:    movzbl (%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++1:    __copyuser_seg movzbl (%_ASM_AX),%edx
+       xor %eax,%eax
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(__get_user_1)
+@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
+ ENTRY(__get_user_2)
+       CFI_STARTPROC
+       add $1,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       jc bad_get_user
+       GET_THREAD_INFO(%_ASM_DX)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+       ASM_STAC
+-2:    movzwl -1(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++2:    __copyuser_seg movzwl -1(%_ASM_AX),%edx
+       xor %eax,%eax
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(__get_user_2)
+@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
+ ENTRY(__get_user_4)
+       CFI_STARTPROC
+       add $3,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       jc bad_get_user
+       GET_THREAD_INFO(%_ASM_DX)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+       ASM_STAC
+-3:    movl -3(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++3:    __copyuser_seg movl -3(%_ASM_AX),%edx
+       xor %eax,%eax
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(__get_user_4)
+@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
+       GET_THREAD_INFO(%_ASM_DX)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
+       ASM_STAC
+ 4:    movq -7(%_ASM_AX),%rdx
+       xor %eax,%eax
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+ #else
+       add $7,%_ASM_AX
+@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user_8
+       ASM_STAC
+-4:    movl -7(%_ASM_AX),%edx
+-5:    movl -3(%_ASM_AX),%ecx
++4:    __copyuser_seg movl -7(%_ASM_AX),%edx
++5:    __copyuser_seg movl -3(%_ASM_AX),%ecx
+       xor %eax,%eax
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+ #endif
+       CFI_ENDPROC
+@@ -113,6 +175,7 @@ bad_get_user:
+       xor %edx,%edx
+       mov $(-EFAULT),%_ASM_AX
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ END(bad_get_user)
+@@ -124,6 +187,7 @@ bad_get_user_8:
+       xor %ecx,%ecx
+       mov $(-EFAULT),%_ASM_AX
+       ASM_CLAC
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ END(bad_get_user_8)
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 54fcffe..7be149e 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -20,8 +20,10 @@
+ #ifdef __KERNEL__
+ #include <linux/string.h>
++#include <asm/pgtable_types.h>
+ #else
+ #include <string.h>
++#define ktla_ktva(addr) addr
+ #endif
+ #include <asm/inat.h>
+ #include <asm/insn.h>
+@@ -53,8 +55,8 @@
+ void insn_init(struct insn *insn, const void *kaddr, int x86_64)
+ {
+       memset(insn, 0, sizeof(*insn));
+-      insn->kaddr = kaddr;
+-      insn->next_byte = kaddr;
++      insn->kaddr = ktla_ktva(kaddr);
++      insn->next_byte = ktla_ktva(kaddr);
+       insn->x86_64 = x86_64 ? 1 : 0;
+       insn->opnd_bytes = 4;
+       if (x86_64)
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index 05a95e7..326f2fa 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -17,6 +17,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+ /*
+  * override generic version in lib/iomap_copy.c
+@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
+       CFI_STARTPROC
+       movl %edx,%ecx
+       rep movsd
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(__iowrite32_copy)
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index 56313a3..9b59269 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -24,7 +24,7 @@
+  * This gets patched over the unrolled variant (below) via the
+  * alternative instructions framework:
+  */
+-      .section .altinstr_replacement, "ax", @progbits
++      .section .altinstr_replacement, "a", @progbits
+ .Lmemcpy_c:
+       movq %rdi, %rax
+       movq %rdx, %rcx
+@@ -33,6 +33,7 @@
+       rep movsq
+       movl %edx, %ecx
+       rep movsb
++      pax_force_retaddr
+       ret
+ .Lmemcpy_e:
+       .previous
+@@ -44,11 +45,12 @@
+  * This gets patched over the unrolled variant (below) via the
+  * alternative instructions framework:
+  */
+-      .section .altinstr_replacement, "ax", @progbits
++      .section .altinstr_replacement, "a", @progbits
+ .Lmemcpy_c_e:
+       movq %rdi, %rax
+       movq %rdx, %rcx
+       rep movsb
++      pax_force_retaddr
+       ret
+ .Lmemcpy_e_e:
+       .previous
+@@ -76,13 +78,13 @@ ENTRY(memcpy)
+        */
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+-      movq 2*8(%rsi), %r10
++      movq 2*8(%rsi), %rcx
+       movq 3*8(%rsi), %r11
+       leaq 4*8(%rsi), %rsi
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+-      movq %r10,      2*8(%rdi)
++      movq %rcx,      2*8(%rdi)
+       movq %r11,      3*8(%rdi)
+       leaq 4*8(%rdi), %rdi
+       jae  .Lcopy_forward_loop
+@@ -105,12 +107,12 @@ ENTRY(memcpy)
+       subq $0x20,     %rdx
+       movq -1*8(%rsi),        %r8
+       movq -2*8(%rsi),        %r9
+-      movq -3*8(%rsi),        %r10
++      movq -3*8(%rsi),        %rcx
+       movq -4*8(%rsi),        %r11
+       leaq -4*8(%rsi),        %rsi
+       movq %r8,               -1*8(%rdi)
+       movq %r9,               -2*8(%rdi)
+-      movq %r10,              -3*8(%rdi)
++      movq %rcx,              -3*8(%rdi)
+       movq %r11,              -4*8(%rdi)
+       leaq -4*8(%rdi),        %rdi
+       jae  .Lcopy_backward_loop
+@@ -130,12 +132,13 @@ ENTRY(memcpy)
+        */
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+-      movq -2*8(%rsi, %rdx),  %r10
++      movq -2*8(%rsi, %rdx),  %rcx
+       movq -1*8(%rsi, %rdx),  %r11
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+-      movq %r10,      -2*8(%rdi, %rdx)
++      movq %rcx,      -2*8(%rdi, %rdx)
+       movq %r11,      -1*8(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_16bytes:
+@@ -148,6 +151,7 @@ ENTRY(memcpy)
+       movq -1*8(%rsi, %rdx),  %r9
+       movq %r8,       0*8(%rdi)
+       movq %r9,       -1*8(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_8bytes:
+@@ -161,6 +165,7 @@ ENTRY(memcpy)
+       movl -4(%rsi, %rdx), %r8d
+       movl %ecx, (%rdi)
+       movl %r8d, -4(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_3bytes:
+@@ -179,6 +184,7 @@ ENTRY(memcpy)
+       movb %cl, (%rdi)
+ .Lend:
++      pax_force_retaddr
+       retq
+       CFI_ENDPROC
+ ENDPROC(memcpy)
+diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
+index 65268a6..5aa7815 100644
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -61,13 +61,13 @@ ENTRY(memmove)
+ 5:
+       sub $0x20, %rdx
+       movq 0*8(%rsi), %r11
+-      movq 1*8(%rsi), %r10
++      movq 1*8(%rsi), %rcx
+       movq 2*8(%rsi), %r9
+       movq 3*8(%rsi), %r8
+       leaq 4*8(%rsi), %rsi
+       movq %r11, 0*8(%rdi)
+-      movq %r10, 1*8(%rdi)
++      movq %rcx, 1*8(%rdi)
+       movq %r9, 2*8(%rdi)
+       movq %r8, 3*8(%rdi)
+       leaq 4*8(%rdi), %rdi
+@@ -81,10 +81,10 @@ ENTRY(memmove)
+ 4:
+       movq %rdx, %rcx
+       movq -8(%rsi, %rdx), %r11
+-      lea -8(%rdi, %rdx), %r10
++      lea -8(%rdi, %rdx), %r9
+       shrq $3, %rcx
+       rep movsq
+-      movq %r11, (%r10)
++      movq %r11, (%r9)
+       jmp 13f
+ .Lmemmove_end_forward:
+@@ -95,14 +95,14 @@ ENTRY(memmove)
+ 7:
+       movq %rdx, %rcx
+       movq (%rsi), %r11
+-      movq %rdi, %r10
++      movq %rdi, %r9
+       leaq -8(%rsi, %rdx), %rsi
+       leaq -8(%rdi, %rdx), %rdi
+       shrq $3, %rcx
+       std
+       rep movsq
+       cld
+-      movq %r11, (%r10)
++      movq %r11, (%r9)
+       jmp 13f
+       /*
+@@ -127,13 +127,13 @@ ENTRY(memmove)
+ 8:
+       subq $0x20, %rdx
+       movq -1*8(%rsi), %r11
+-      movq -2*8(%rsi), %r10
++      movq -2*8(%rsi), %rcx
+       movq -3*8(%rsi), %r9
+       movq -4*8(%rsi), %r8
+       leaq -4*8(%rsi), %rsi
+       movq %r11, -1*8(%rdi)
+-      movq %r10, -2*8(%rdi)
++      movq %rcx, -2*8(%rdi)
+       movq %r9, -3*8(%rdi)
+       movq %r8, -4*8(%rdi)
+       leaq -4*8(%rdi), %rdi
+@@ -151,11 +151,11 @@ ENTRY(memmove)
+        * Move data from 16 bytes to 31 bytes.
+        */
+       movq 0*8(%rsi), %r11
+-      movq 1*8(%rsi), %r10
++      movq 1*8(%rsi), %rcx
+       movq -2*8(%rsi, %rdx), %r9
+       movq -1*8(%rsi, %rdx), %r8
+       movq %r11, 0*8(%rdi)
+-      movq %r10, 1*8(%rdi)
++      movq %rcx, 1*8(%rdi)
+       movq %r9, -2*8(%rdi, %rdx)
+       movq %r8, -1*8(%rdi, %rdx)
+       jmp 13f
+@@ -167,9 +167,9 @@ ENTRY(memmove)
+        * Move data from 8 bytes to 15 bytes.
+        */
+       movq 0*8(%rsi), %r11
+-      movq -1*8(%rsi, %rdx), %r10
++      movq -1*8(%rsi, %rdx), %r9
+       movq %r11, 0*8(%rdi)
+-      movq %r10, -1*8(%rdi, %rdx)
++      movq %r9, -1*8(%rdi, %rdx)
+       jmp 13f
+ 10:
+       cmpq $4, %rdx
+@@ -178,9 +178,9 @@ ENTRY(memmove)
+        * Move data from 4 bytes to 7 bytes.
+        */
+       movl (%rsi), %r11d
+-      movl -4(%rsi, %rdx), %r10d
++      movl -4(%rsi, %rdx), %r9d
+       movl %r11d, (%rdi)
+-      movl %r10d, -4(%rdi, %rdx)
++      movl %r9d, -4(%rdi, %rdx)
+       jmp 13f
+ 11:
+       cmp $2, %rdx
+@@ -189,9 +189,9 @@ ENTRY(memmove)
+        * Move data from 2 bytes to 3 bytes.
+        */
+       movw (%rsi), %r11w
+-      movw -2(%rsi, %rdx), %r10w
++      movw -2(%rsi, %rdx), %r9w
+       movw %r11w, (%rdi)
+-      movw %r10w, -2(%rdi, %rdx)
++      movw %r9w, -2(%rdi, %rdx)
+       jmp 13f
+ 12:
+       cmp $1, %rdx
+@@ -202,14 +202,16 @@ ENTRY(memmove)
+       movb (%rsi), %r11b
+       movb %r11b, (%rdi)
+ 13:
++      pax_force_retaddr
+       retq
+       CFI_ENDPROC
+-      .section .altinstr_replacement,"ax"
++      .section .altinstr_replacement,"a"
+ .Lmemmove_begin_forward_efs:
+       /* Forward moving data. */
+       movq %rdx, %rcx
+       rep movsb
++      pax_force_retaddr
+       retq
+ .Lmemmove_end_forward_efs:
+       .previous
+diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
+index 2dcb380..50a78bc 100644
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -16,7 +16,7 @@
+  * 
+  * rax   original destination
+  */   
+-      .section .altinstr_replacement, "ax", @progbits
++      .section .altinstr_replacement, "a", @progbits
+ .Lmemset_c:
+       movq %rdi,%r9
+       movq %rdx,%rcx
+@@ -30,6 +30,7 @@
+       movl %edx,%ecx
+       rep stosb
+       movq %r9,%rax
++      pax_force_retaddr
+       ret
+ .Lmemset_e:
+       .previous
+@@ -45,13 +46,14 @@
+  *
+  * rax   original destination
+  */
+-      .section .altinstr_replacement, "ax", @progbits
++      .section .altinstr_replacement, "a", @progbits
+ .Lmemset_c_e:
+       movq %rdi,%r9
+       movb %sil,%al
+       movq %rdx,%rcx
+       rep stosb
+       movq %r9,%rax
++      pax_force_retaddr
+       ret
+ .Lmemset_e_e:
+       .previous
+@@ -59,7 +61,7 @@
+ ENTRY(memset)
+ ENTRY(__memset)
+       CFI_STARTPROC
+-      movq %rdi,%r10
++      movq %rdi,%r11
+       /* expand byte value  */
+       movzbl %sil,%ecx
+@@ -117,7 +119,8 @@ ENTRY(__memset)
+       jnz     .Lloop_1
+ .Lende:
+-      movq    %r10,%rax
++      movq    %r11,%rax
++      pax_force_retaddr
+       ret
+       CFI_RESTORE_STATE
+diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
+index c9f2d9b..e7fd2c0 100644
+--- a/arch/x86/lib/mmx_32.c
++++ b/arch/x86/lib/mmx_32.c
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ {
+       void *p;
+       int i;
++      unsigned long cr0;
+       if (unlikely(in_interrupt()))
+               return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+       kernel_fpu_begin();
+       __asm__ __volatile__ (
+-              "1: prefetch (%0)\n"            /* This set is 28 bytes */
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"            /* This set is 28 bytes */
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from));
++                      : "=&r" (cr0) : "r" (from) : "ax");
+       for ( ; i > 5; i--) {
+               __asm__ __volatile__ (
+-              "1:  prefetch 320(%0)\n"
+-              "2:  movq (%0), %%mm0\n"
+-              "  movq 8(%0), %%mm1\n"
+-              "  movq 16(%0), %%mm2\n"
+-              "  movq 24(%0), %%mm3\n"
+-              "  movq %%mm0, (%1)\n"
+-              "  movq %%mm1, 8(%1)\n"
+-              "  movq %%mm2, 16(%1)\n"
+-              "  movq %%mm3, 24(%1)\n"
+-              "  movq 32(%0), %%mm0\n"
+-              "  movq 40(%0), %%mm1\n"
+-              "  movq 48(%0), %%mm2\n"
+-              "  movq 56(%0), %%mm3\n"
+-              "  movq %%mm0, 32(%1)\n"
+-              "  movq %%mm1, 40(%1)\n"
+-              "  movq %%mm2, 48(%1)\n"
+-              "  movq %%mm3, 56(%1)\n"
++              "1:  prefetch 320(%1)\n"
++              "2:  movq (%1), %%mm0\n"
++              "  movq 8(%1), %%mm1\n"
++              "  movq 16(%1), %%mm2\n"
++              "  movq 24(%1), %%mm3\n"
++              "  movq %%mm0, (%2)\n"
++              "  movq %%mm1, 8(%2)\n"
++              "  movq %%mm2, 16(%2)\n"
++              "  movq %%mm3, 24(%2)\n"
++              "  movq 32(%1), %%mm0\n"
++              "  movq 40(%1), %%mm1\n"
++              "  movq 48(%1), %%mm2\n"
++              "  movq 56(%1), %%mm3\n"
++              "  movq %%mm0, 32(%2)\n"
++              "  movq %%mm1, 40(%2)\n"
++              "  movq %%mm2, 48(%2)\n"
++              "  movq %%mm3, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from), "r" (to) : "memory");
++                      : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+       int i;
++      unsigned long cr0;
+       kernel_fpu_begin();
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
+        * but that is for later. -AV
+        */
+       __asm__ __volatile__(
+-              "1: prefetch (%0)\n"
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-                      _ASM_EXTABLE(1b, 3b) : : "r" (from));
++                      _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+       for (i = 0; i < (4096-320)/64; i++) {
+               __asm__ __volatile__ (
+-              "1: prefetch 320(%0)\n"
+-              "2: movq (%0), %%mm0\n"
+-              "   movntq %%mm0, (%1)\n"
+-              "   movq 8(%0), %%mm1\n"
+-              "   movntq %%mm1, 8(%1)\n"
+-              "   movq 16(%0), %%mm2\n"
+-              "   movntq %%mm2, 16(%1)\n"
+-              "   movq 24(%0), %%mm3\n"
+-              "   movntq %%mm3, 24(%1)\n"
+-              "   movq 32(%0), %%mm4\n"
+-              "   movntq %%mm4, 32(%1)\n"
+-              "   movq 40(%0), %%mm5\n"
+-              "   movntq %%mm5, 40(%1)\n"
+-              "   movq 48(%0), %%mm6\n"
+-              "   movntq %%mm6, 48(%1)\n"
+-              "   movq 56(%0), %%mm7\n"
+-              "   movntq %%mm7, 56(%1)\n"
++              "1: prefetch 320(%1)\n"
++              "2: movq (%1), %%mm0\n"
++              "   movntq %%mm0, (%2)\n"
++              "   movq 8(%1), %%mm1\n"
++              "   movntq %%mm1, 8(%2)\n"
++              "   movq 16(%1), %%mm2\n"
++              "   movntq %%mm2, 16(%2)\n"
++              "   movq 24(%1), %%mm3\n"
++              "   movntq %%mm3, 24(%2)\n"
++              "   movq 32(%1), %%mm4\n"
++              "   movntq %%mm4, 32(%2)\n"
++              "   movq 40(%1), %%mm5\n"
++              "   movntq %%mm5, 40(%2)\n"
++              "   movq 48(%1), %%mm6\n"
++              "   movntq %%mm6, 48(%2)\n"
++              "   movq 56(%1), %%mm7\n"
++              "   movntq %%mm7, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-              _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++              _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+       int i;
++      unsigned long cr0;
+       kernel_fpu_begin();
+       __asm__ __volatile__ (
+-              "1: prefetch (%0)\n"
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-                      _ASM_EXTABLE(1b, 3b) : : "r" (from));
++                      _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+       for (i = 0; i < 4096/64; i++) {
+               __asm__ __volatile__ (
+-              "1: prefetch 320(%0)\n"
+-              "2: movq (%0), %%mm0\n"
+-              "   movq 8(%0), %%mm1\n"
+-              "   movq 16(%0), %%mm2\n"
+-              "   movq 24(%0), %%mm3\n"
+-              "   movq %%mm0, (%1)\n"
+-              "   movq %%mm1, 8(%1)\n"
+-              "   movq %%mm2, 16(%1)\n"
+-              "   movq %%mm3, 24(%1)\n"
+-              "   movq 32(%0), %%mm0\n"
+-              "   movq 40(%0), %%mm1\n"
+-              "   movq 48(%0), %%mm2\n"
+-              "   movq 56(%0), %%mm3\n"
+-              "   movq %%mm0, 32(%1)\n"
+-              "   movq %%mm1, 40(%1)\n"
+-              "   movq %%mm2, 48(%1)\n"
+-              "   movq %%mm3, 56(%1)\n"
++              "1: prefetch 320(%1)\n"
++              "2: movq (%1), %%mm0\n"
++              "   movq 8(%1), %%mm1\n"
++              "   movq 16(%1), %%mm2\n"
++              "   movq 24(%1), %%mm3\n"
++              "   movq %%mm0, (%2)\n"
++              "   movq %%mm1, 8(%2)\n"
++              "   movq %%mm2, 16(%2)\n"
++              "   movq %%mm3, 24(%2)\n"
++              "   movq 32(%1), %%mm0\n"
++              "   movq 40(%1), %%mm1\n"
++              "   movq 48(%1), %%mm2\n"
++              "   movq 56(%1), %%mm3\n"
++              "   movq %%mm0, 32(%2)\n"
++              "   movq %%mm1, 40(%2)\n"
++              "   movq %%mm2, 48(%2)\n"
++              "   movq %%mm3, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from), "r" (to) : "memory");
++                      : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
+index f6d13ee..aca5f0b 100644
+--- a/arch/x86/lib/msr-reg.S
++++ b/arch/x86/lib/msr-reg.S
+@@ -3,6 +3,7 @@
+ #include <asm/dwarf2.h>
+ #include <asm/asm.h>
+ #include <asm/msr.h>
++#include <asm/alternative-asm.h>
+ #ifdef CONFIG_X86_64
+ /*
+@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
+       CFI_STARTPROC
+       pushq_cfi %rbx
+       pushq_cfi %rbp
+-      movq    %rdi, %r10      /* Save pointer */
++      movq    %rdi, %r9       /* Save pointer */
+       xorl    %r11d, %r11d    /* Return value */
+       movl    (%rdi), %eax
+       movl    4(%rdi), %ecx
+@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
+       movl    28(%rdi), %edi
+       CFI_REMEMBER_STATE
+ 1:    \op
+-2:    movl    %eax, (%r10)
++2:    movl    %eax, (%r9)
+       movl    %r11d, %eax     /* Return value */
+-      movl    %ecx, 4(%r10)
+-      movl    %edx, 8(%r10)
+-      movl    %ebx, 12(%r10)
+-      movl    %ebp, 20(%r10)
+-      movl    %esi, 24(%r10)
+-      movl    %edi, 28(%r10)
++      movl    %ecx, 4(%r9)
++      movl    %edx, 8(%r9)
++      movl    %ebx, 12(%r9)
++      movl    %ebp, 20(%r9)
++      movl    %esi, 24(%r9)
++      movl    %edi, 28(%r9)
+       popq_cfi %rbp
+       popq_cfi %rbx
++      pax_force_retaddr
+       ret
+ 3:
+       CFI_RESTORE_STATE
+diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
+index fc6ba17..d4d989d 100644
+--- a/arch/x86/lib/putuser.S
++++ b/arch/x86/lib/putuser.S
+@@ -16,7 +16,9 @@
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+-
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+ /*
+  * __put_user_X
+@@ -30,57 +32,125 @@
+  * as they get called from within inline assembly.
+  */
+-#define ENTER CFI_STARTPROC ; \
+-              GET_THREAD_INFO(%_ASM_BX)
+-#define EXIT  ASM_CLAC ;      \
+-              ret ;           \
++#define ENTER CFI_STARTPROC
++#define EXIT  ASM_CLAC ;              \
++              pax_force_retaddr ;     \
++              ret ;                   \
+               CFI_ENDPROC
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define _DEST %_ASM_CX,%_ASM_BX
++#else
++#define _DEST %_ASM_CX
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
++
+ .text
+ ENTRY(__put_user_1)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      GET_THREAD_INFO(%_ASM_BX)
+       cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+       jae bad_put_user
+       ASM_STAC
+-1:    movb %al,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++1:    __copyuser_seg movb %al,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_1)
+ ENTRY(__put_user_2)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      GET_THREAD_INFO(%_ASM_BX)
+       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $1,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+       ASM_STAC
+-2:    movw %ax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++2:    __copyuser_seg movw %ax,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_2)
+ ENTRY(__put_user_4)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      GET_THREAD_INFO(%_ASM_BX)
+       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $3,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+       ASM_STAC
+-3:    movl %eax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++3:    __copyuser_seg movl %eax,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_4)
+ ENTRY(__put_user_8)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      GET_THREAD_INFO(%_ASM_BX)
+       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $7,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+       ASM_STAC
+-4:    mov %_ASM_AX,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++4:    __copyuser_seg mov %_ASM_AX,(_DEST)
+ #ifdef CONFIG_X86_32
+-5:    movl %edx,4(%_ASM_CX)
++5:    __copyuser_seg movl %edx,4(_DEST)
+ #endif
+       xor %eax,%eax
+       EXIT
+diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
+index 1cad221..de671ee 100644
+--- a/arch/x86/lib/rwlock.S
++++ b/arch/x86/lib/rwlock.S
+@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
+       FRAME
+ 0:    LOCK_PREFIX
+       WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 1234f
++      LOCK_PREFIX
++      WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++      int $4
++1234:
++      _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1:    rep; nop
+       cmpl    $WRITE_LOCK_CMP, (%__lock_ptr)
+       jne     1b
+       LOCK_PREFIX
+       WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 1234f
++      LOCK_PREFIX
++      WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++      int $4
++1234:
++      _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+       jnz     0b
+       ENDFRAME
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ END(__write_lock_failed)
+@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
+       FRAME
+ 0:    LOCK_PREFIX
+       READ_LOCK_SIZE(inc) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 1234f
++      LOCK_PREFIX
++      READ_LOCK_SIZE(dec) (%__lock_ptr)
++      int $4
++1234:
++      _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1:    rep; nop
+       READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
+       js      1b
+       LOCK_PREFIX
+       READ_LOCK_SIZE(dec) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++      jno 1234f
++      LOCK_PREFIX
++      READ_LOCK_SIZE(inc) (%__lock_ptr)
++      int $4
++1234:
++      _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+       js      0b
+       ENDFRAME
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ END(__read_lock_failed)
+diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
+index 5dff5f0..cadebf4 100644
+--- a/arch/x86/lib/rwsem.S
++++ b/arch/x86/lib/rwsem.S
+@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
+       __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+       CFI_RESTORE __ASM_REG(dx)
+       restore_common_regs
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(call_rwsem_down_read_failed)
+@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
+       movq %rax,%rdi
+       call rwsem_down_write_failed
+       restore_common_regs
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(call_rwsem_down_write_failed)
+@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
+       movq %rax,%rdi
+       call rwsem_wake
+       restore_common_regs
+-1:    ret
++1:    pax_force_retaddr
++      ret
+       CFI_ENDPROC
+ ENDPROC(call_rwsem_wake)
+@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
+       __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+       CFI_RESTORE __ASM_REG(dx)
+       restore_common_regs
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ ENDPROC(call_rwsem_downgrade_wake)
+diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
+index a63efd6..ccecad8 100644
+--- a/arch/x86/lib/thunk_64.S
++++ b/arch/x86/lib/thunk_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/calling.h>
++#include <asm/alternative-asm.h>
+       /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+       .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -41,5 +42,6 @@
+       SAVE_ARGS
+ restore:
+       RESTORE_ARGS
++      pax_force_retaddr
+       ret
+       CFI_ENDPROC
+diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
+index 3eb18ac..6890bc3 100644
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -42,11 +42,13 @@ do {                                                                       \
+       int __d0;                                                       \
+       might_fault();                                                  \
+       __asm__ __volatile__(                                           \
++              __COPYUSER_SET_ES                                       \
+               ASM_STAC "\n"                                           \
+               "0:     rep; stosl\n"                                   \
+               "       movl %2,%0\n"                                   \
+               "1:     rep; stosb\n"                                   \
+               "2: " ASM_CLAC "\n"                                     \
++              __COPYUSER_RESTORE_ES                                   \
+               ".section .fixup,\"ax\"\n"                              \
+               "3:     lea 0(%2,%0,4),%0\n"                            \
+               "       jmp 2b\n"                                       \
+@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+       int d0, d1;
+       __asm__ __volatile__(
+@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+                      "       .align 2,0x90\n"
+                      "3:     movl 0(%4), %%eax\n"
+                      "4:     movl 4(%4), %%edx\n"
+-                     "5:     movl %%eax, 0(%3)\n"
+-                     "6:     movl %%edx, 4(%3)\n"
++                     "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
++                     "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
+                      "7:     movl 8(%4), %%eax\n"
+                      "8:     movl 12(%4),%%edx\n"
+-                     "9:     movl %%eax, 8(%3)\n"
+-                     "10:    movl %%edx, 12(%3)\n"
++                     "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
++                     "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
+                      "11:    movl 16(%4), %%eax\n"
+                      "12:    movl 20(%4), %%edx\n"
+-                     "13:    movl %%eax, 16(%3)\n"
+-                     "14:    movl %%edx, 20(%3)\n"
++                     "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
++                     "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
+                      "15:    movl 24(%4), %%eax\n"
+                      "16:    movl 28(%4), %%edx\n"
+-                     "17:    movl %%eax, 24(%3)\n"
+-                     "18:    movl %%edx, 28(%3)\n"
++                     "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
++                     "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
+                      "19:    movl 32(%4), %%eax\n"
+                      "20:    movl 36(%4), %%edx\n"
+-                     "21:    movl %%eax, 32(%3)\n"
+-                     "22:    movl %%edx, 36(%3)\n"
++                     "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
++                     "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
+                      "23:    movl 40(%4), %%eax\n"
+                      "24:    movl 44(%4), %%edx\n"
+-                     "25:    movl %%eax, 40(%3)\n"
+-                     "26:    movl %%edx, 44(%3)\n"
++                     "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
++                     "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
+                      "27:    movl 48(%4), %%eax\n"
+                      "28:    movl 52(%4), %%edx\n"
+-                     "29:    movl %%eax, 48(%3)\n"
+-                     "30:    movl %%edx, 52(%3)\n"
++                     "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
++                     "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
+                      "31:    movl 56(%4), %%eax\n"
+                      "32:    movl 60(%4), %%edx\n"
+-                     "33:    movl %%eax, 56(%3)\n"
+-                     "34:    movl %%edx, 60(%3)\n"
++                     "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
++                     "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
+                      "       addl $-64, %0\n"
+                      "       addl $64, %4\n"
+                      "       addl $64, %3\n"
+@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+                      "       shrl  $2, %0\n"
+                      "       andl  $3, %%eax\n"
+                      "       cld\n"
++                     __COPYUSER_SET_ES
+                      "99:    rep; movsl\n"
+                      "36:    movl %%eax, %0\n"
+                      "37:    rep; movsb\n"
+                      "100:\n"
++                     __COPYUSER_RESTORE_ES
+                      ".section .fixup,\"ax\"\n"
+                      "101:   lea 0(%%eax,%0,4),%0\n"
+                      "       jmp 100b\n"
+@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+ }
+ static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++      int d0, d1;
++      __asm__ __volatile__(
++                     "       .align 2,0x90\n"
++                     "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "       cmpl $67, %0\n"
++                     "       jbe 3f\n"
++                     "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
++                     "       .align 2,0x90\n"
++                     "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
++                     "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
++                     "5:     movl %%eax, 0(%3)\n"
++                     "6:     movl %%edx, 4(%3)\n"
++                     "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
++                     "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
++                     "9:     movl %%eax, 8(%3)\n"
++                     "10:    movl %%edx, 12(%3)\n"
++                     "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
++                     "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
++                     "13:    movl %%eax, 16(%3)\n"
++                     "14:    movl %%edx, 20(%3)\n"
++                     "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
++                     "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
++                     "17:    movl %%eax, 24(%3)\n"
++                     "18:    movl %%edx, 28(%3)\n"
++                     "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
++                     "21:    movl %%eax, 32(%3)\n"
++                     "22:    movl %%edx, 36(%3)\n"
++                     "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
++                     "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
++                     "25:    movl %%eax, 40(%3)\n"
++                     "26:    movl %%edx, 44(%3)\n"
++                     "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
++                     "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
++                     "29:    movl %%eax, 48(%3)\n"
++                     "30:    movl %%edx, 52(%3)\n"
++                     "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
++                     "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
++                     "33:    movl %%eax, 56(%3)\n"
++                     "34:    movl %%edx, 60(%3)\n"
++                     "       addl $-64, %0\n"
++                     "       addl $64, %4\n"
++                     "       addl $64, %3\n"
++                     "       cmpl $63, %0\n"
++                     "       ja  1b\n"
++                     "35:    movl  %0, %%eax\n"
++                     "       shrl  $2, %0\n"
++                     "       andl  $3, %%eax\n"
++                     "       cld\n"
++                     "99:    rep; "__copyuser_seg" movsl\n"
++                     "36:    movl %%eax, %0\n"
++                     "37:    rep; "__copyuser_seg" movsb\n"
++                     "100:\n"
++                     ".section .fixup,\"ax\"\n"
++                     "101:   lea 0(%%eax,%0,4),%0\n"
++                     "       jmp 100b\n"
++                     ".previous\n"
++                     _ASM_EXTABLE(1b,100b)
++                     _ASM_EXTABLE(2b,100b)
++                     _ASM_EXTABLE(3b,100b)
++                     _ASM_EXTABLE(4b,100b)
++                     _ASM_EXTABLE(5b,100b)
++                     _ASM_EXTABLE(6b,100b)
++                     _ASM_EXTABLE(7b,100b)
++                     _ASM_EXTABLE(8b,100b)
++                     _ASM_EXTABLE(9b,100b)
++                     _ASM_EXTABLE(10b,100b)
++                     _ASM_EXTABLE(11b,100b)
++                     _ASM_EXTABLE(12b,100b)
++                     _ASM_EXTABLE(13b,100b)
++                     _ASM_EXTABLE(14b,100b)
++                     _ASM_EXTABLE(15b,100b)
++                     _ASM_EXTABLE(16b,100b)
++                     _ASM_EXTABLE(17b,100b)
++                     _ASM_EXTABLE(18b,100b)
++                     _ASM_EXTABLE(19b,100b)
++                     _ASM_EXTABLE(20b,100b)
++                     _ASM_EXTABLE(21b,100b)
++                     _ASM_EXTABLE(22b,100b)
++                     _ASM_EXTABLE(23b,100b)
++                     _ASM_EXTABLE(24b,100b)
++                     _ASM_EXTABLE(25b,100b)
++                     _ASM_EXTABLE(26b,100b)
++                     _ASM_EXTABLE(27b,100b)
++                     _ASM_EXTABLE(28b,100b)
++                     _ASM_EXTABLE(29b,100b)
++                     _ASM_EXTABLE(30b,100b)
++                     _ASM_EXTABLE(31b,100b)
++                     _ASM_EXTABLE(32b,100b)
++                     _ASM_EXTABLE(33b,100b)
++                     _ASM_EXTABLE(34b,100b)
++                     _ASM_EXTABLE(35b,100b)
++                     _ASM_EXTABLE(36b,100b)
++                     _ASM_EXTABLE(37b,100b)
++                     _ASM_EXTABLE(99b,101b)
++                     : "=&c"(size), "=&D" (d0), "=&S" (d1)
++                     :  "1"(to), "2"(from), "0"(size)
++                     : "eax", "edx", "memory");
++      return size;
++}
++
++static unsigned long __size_overflow(3)
+ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ {
+       int d0, d1;
+       __asm__ __volatile__(
+                      "        .align 2,0x90\n"
+-                     "0:      movl 32(%4), %%eax\n"
++                     "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "        cmpl $67, %0\n"
+                      "        jbe 2f\n"
+-                     "1:      movl 64(%4), %%eax\n"
++                     "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+                      "        .align 2,0x90\n"
+-                     "2:      movl 0(%4), %%eax\n"
+-                     "21:     movl 4(%4), %%edx\n"
++                     "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++                     "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+                      "        movl %%eax, 0(%3)\n"
+                      "        movl %%edx, 4(%3)\n"
+-                     "3:      movl 8(%4), %%eax\n"
+-                     "31:     movl 12(%4),%%edx\n"
++                     "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++                     "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+                      "        movl %%eax, 8(%3)\n"
+                      "        movl %%edx, 12(%3)\n"
+-                     "4:      movl 16(%4), %%eax\n"
+-                     "41:     movl 20(%4), %%edx\n"
++                     "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++                     "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+                      "        movl %%eax, 16(%3)\n"
+                      "        movl %%edx, 20(%3)\n"
+-                     "10:     movl 24(%4), %%eax\n"
+-                     "51:     movl 28(%4), %%edx\n"
++                     "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++                     "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+                      "        movl %%eax, 24(%3)\n"
+                      "        movl %%edx, 28(%3)\n"
+-                     "11:     movl 32(%4), %%eax\n"
+-                     "61:     movl 36(%4), %%edx\n"
++                     "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+                      "        movl %%eax, 32(%3)\n"
+                      "        movl %%edx, 36(%3)\n"
+-                     "12:     movl 40(%4), %%eax\n"
+-                     "71:     movl 44(%4), %%edx\n"
++                     "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++                     "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+                      "        movl %%eax, 40(%3)\n"
+                      "        movl %%edx, 44(%3)\n"
+-                     "13:     movl 48(%4), %%eax\n"
+-                     "81:     movl 52(%4), %%edx\n"
++                     "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++                     "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+                      "        movl %%eax, 48(%3)\n"
+                      "        movl %%edx, 52(%3)\n"
+-                     "14:     movl 56(%4), %%eax\n"
+-                     "91:     movl 60(%4), %%edx\n"
++                     "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++                     "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+                      "        movl %%eax, 56(%3)\n"
+                      "        movl %%edx, 60(%3)\n"
+                      "        addl $-64, %0\n"
+@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+                      "        shrl  $2, %0\n"
+                      "        andl $3, %%eax\n"
+                      "        cld\n"
+-                     "6:      rep; movsl\n"
++                     "6:      rep; "__copyuser_seg" movsl\n"
+                      "        movl %%eax,%0\n"
+-                     "7:      rep; movsb\n"
++                     "7:      rep; "__copyuser_seg" movsb\n"
+                      "8:\n"
+                      ".section .fixup,\"ax\"\n"
+                      "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+  * hyoshiok@miraclelinux.com
+  */
+-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
+                               const void __user *from, unsigned long size)
+ {
+       int d0, d1;
+       __asm__ __volatile__(
+              "        .align 2,0x90\n"
+-             "0:      movl 32(%4), %%eax\n"
++             "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+              "        cmpl $67, %0\n"
+              "        jbe 2f\n"
+-             "1:      movl 64(%4), %%eax\n"
++             "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+              "        .align 2,0x90\n"
+-             "2:      movl 0(%4), %%eax\n"
+-             "21:     movl 4(%4), %%edx\n"
++             "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++             "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+              "        movnti %%eax, 0(%3)\n"
+              "        movnti %%edx, 4(%3)\n"
+-             "3:      movl 8(%4), %%eax\n"
+-             "31:     movl 12(%4),%%edx\n"
++             "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++             "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+              "        movnti %%eax, 8(%3)\n"
+              "        movnti %%edx, 12(%3)\n"
+-             "4:      movl 16(%4), %%eax\n"
+-             "41:     movl 20(%4), %%edx\n"
++             "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++             "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+              "        movnti %%eax, 16(%3)\n"
+              "        movnti %%edx, 20(%3)\n"
+-             "10:     movl 24(%4), %%eax\n"
+-             "51:     movl 28(%4), %%edx\n"
++             "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++             "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+              "        movnti %%eax, 24(%3)\n"
+              "        movnti %%edx, 28(%3)\n"
+-             "11:     movl 32(%4), %%eax\n"
+-             "61:     movl 36(%4), %%edx\n"
++             "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++             "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+              "        movnti %%eax, 32(%3)\n"
+              "        movnti %%edx, 36(%3)\n"
+-             "12:     movl 40(%4), %%eax\n"
+-             "71:     movl 44(%4), %%edx\n"
++             "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++             "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+              "        movnti %%eax, 40(%3)\n"
+              "        movnti %%edx, 44(%3)\n"
+-             "13:     movl 48(%4), %%eax\n"
+-             "81:     movl 52(%4), %%edx\n"
++             "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++             "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+              "        movnti %%eax, 48(%3)\n"
+              "        movnti %%edx, 52(%3)\n"
+-             "14:     movl 56(%4), %%eax\n"
+-             "91:     movl 60(%4), %%edx\n"
++             "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++             "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+              "        movnti %%eax, 56(%3)\n"
+              "        movnti %%edx, 60(%3)\n"
+              "        addl $-64, %0\n"
+@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+              "        shrl  $2, %0\n"
+              "        andl $3, %%eax\n"
+              "        cld\n"
+-             "6:      rep; movsl\n"
++             "6:      rep; "__copyuser_seg" movsl\n"
+              "        movl %%eax,%0\n"
+-             "7:      rep; movsb\n"
++             "7:      rep; "__copyuser_seg" movsb\n"
+              "8:\n"
+              ".section .fixup,\"ax\"\n"
+              "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+       return size;
+ }
+-static unsigned long __copy_user_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
+                               const void __user *from, unsigned long size)
+ {
+       int d0, d1;
+       __asm__ __volatile__(
+              "        .align 2,0x90\n"
+-             "0:      movl 32(%4), %%eax\n"
++             "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+              "        cmpl $67, %0\n"
+              "        jbe 2f\n"
+-             "1:      movl 64(%4), %%eax\n"
++             "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+              "        .align 2,0x90\n"
+-             "2:      movl 0(%4), %%eax\n"
+-             "21:     movl 4(%4), %%edx\n"
++             "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++             "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+              "        movnti %%eax, 0(%3)\n"
+              "        movnti %%edx, 4(%3)\n"
+-             "3:      movl 8(%4), %%eax\n"
+-             "31:     movl 12(%4),%%edx\n"
++             "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++             "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+              "        movnti %%eax, 8(%3)\n"
+              "        movnti %%edx, 12(%3)\n"
+-             "4:      movl 16(%4), %%eax\n"
+-             "41:     movl 20(%4), %%edx\n"
++             "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++             "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+              "        movnti %%eax, 16(%3)\n"
+              "        movnti %%edx, 20(%3)\n"
+-             "10:     movl 24(%4), %%eax\n"
+-             "51:     movl 28(%4), %%edx\n"
++             "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++             "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+              "        movnti %%eax, 24(%3)\n"
+              "        movnti %%edx, 28(%3)\n"
+-             "11:     movl 32(%4), %%eax\n"
+-             "61:     movl 36(%4), %%edx\n"
++             "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++             "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+              "        movnti %%eax, 32(%3)\n"
+              "        movnti %%edx, 36(%3)\n"
+-             "12:     movl 40(%4), %%eax\n"
+-             "71:     movl 44(%4), %%edx\n"
++             "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++             "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+              "        movnti %%eax, 40(%3)\n"
+              "        movnti %%edx, 44(%3)\n"
+-             "13:     movl 48(%4), %%eax\n"
+-             "81:     movl 52(%4), %%edx\n"
++             "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++             "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+              "        movnti %%eax, 48(%3)\n"
+              "        movnti %%edx, 52(%3)\n"
+-             "14:     movl 56(%4), %%eax\n"
+-             "91:     movl 60(%4), %%edx\n"
++             "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++             "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+              "        movnti %%eax, 56(%3)\n"
+              "        movnti %%edx, 60(%3)\n"
+              "        addl $-64, %0\n"
+@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+              "        shrl  $2, %0\n"
+              "        andl $3, %%eax\n"
+              "        cld\n"
+-             "6:      rep; movsl\n"
++             "6:      rep; "__copyuser_seg" movsl\n"
+              "        movl %%eax,%0\n"
+-             "7:      rep; movsb\n"
++             "7:      rep; "__copyuser_seg" movsb\n"
+              "8:\n"
+              ".section .fixup,\"ax\"\n"
+              "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+  */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+                                       unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++                                      unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+                                       unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+                               const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+ /* Generic arbitrary sized copy.  */
+-#define __copy_user(to, from, size)                                   \
++#define __copy_user(to, from, size, prefix, set, restore)             \
+ do {                                                                  \
+       int __d0, __d1, __d2;                                           \
+       __asm__ __volatile__(                                           \
++              set                                                     \
+               "       cmp  $7,%0\n"                                   \
+               "       jbe  1f\n"                                      \
+               "       movl %1,%0\n"                                   \
+               "       negl %0\n"                                      \
+               "       andl $7,%0\n"                                   \
+               "       subl %0,%3\n"                                   \
+-              "4:     rep; movsb\n"                                   \
++              "4:     rep; "prefix"movsb\n"                           \
+               "       movl %3,%0\n"                                   \
+               "       shrl $2,%0\n"                                   \
+               "       andl $3,%3\n"                                   \
+               "       .align 2,0x90\n"                                \
+-              "0:     rep; movsl\n"                                   \
++              "0:     rep; "prefix"movsl\n"                           \
+               "       movl %3,%0\n"                                   \
+-              "1:     rep; movsb\n"                                   \
++              "1:     rep; "prefix"movsb\n"                           \
+               "2:\n"                                                  \
++              restore                                                 \
+               ".section .fixup,\"ax\"\n"                              \
+               "5:     addl %3,%0\n"                                   \
+               "       jmp 2b\n"                                       \
+@@ -538,14 +650,14 @@ do {                                                                     \
+               "       negl %0\n"                                      \
+               "       andl $7,%0\n"                                   \
+               "       subl %0,%3\n"                                   \
+-              "4:     rep; movsb\n"                                   \
++              "4:     rep; "__copyuser_seg"movsb\n"                   \
+               "       movl %3,%0\n"                                   \
+               "       shrl $2,%0\n"                                   \
+               "       andl $3,%3\n"                                   \
+               "       .align 2,0x90\n"                                \
+-              "0:     rep; movsl\n"                                   \
++              "0:     rep; "__copyuser_seg"movsl\n"                   \
+               "       movl %3,%0\n"                                   \
+-              "1:     rep; movsb\n"                                   \
++              "1:     rep; "__copyuser_seg"movsb\n"                   \
+               "2:\n"                                                  \
+               ".section .fixup,\"ax\"\n"                              \
+               "5:     addl %3,%0\n"                                   \
+@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ {
+       stac();
+       if (movsl_is_ok(to, from, n))
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
+       else
+-              n = __copy_user_intel(to, from, n);
++              n = __generic_copy_to_user_intel(to, from, n);
+       clac();
+       return n;
+ }
+@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+ {
+       stac();
+       if (movsl_is_ok(to, from, n))
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, __copyuser_seg, "", "");
+       else
+-              n = __copy_user_intel((void __user *)to,
+-                                    (const void *)from, n);
++              n = __generic_copy_from_user_intel(to, from, n);
+       clac();
+       return n;
+ }
+@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+       if (n > 64 && cpu_has_xmm2)
+               n = __copy_user_intel_nocache(to, from, n);
+       else
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, __copyuser_seg, "", "");
+ #else
+-      __copy_user(to, from, n);
++      __copy_user(to, from, n, __copyuser_seg, "", "");
+ #endif
+       clac();
+       return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to:   Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n:    Number of bytes to copy.
+- *
+- * Context: User context only.  This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x)
+ {
+-      if (access_ok(VERIFY_WRITE, to, n))
+-              n = __copy_to_user(to, from, n);
+-      return n;
++      switch (x.seg) {
++      case 0:
++              loadsegment(gs, 0);
++              break;
++      case TASK_SIZE_MAX:
++              loadsegment(gs, __USER_DS);
++              break;
++      case -1UL:
++              loadsegment(gs, __KERNEL_DS);
++              break;
++      default:
++              BUG();
++      }
+ }
+-EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(__set_fs);
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to:   Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n:    Number of bytes to copy.
+- *
+- * Context: User context only.  This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned long n)
++void set_fs(mm_segment_t x)
+ {
+-      if (access_ok(VERIFY_READ, from, n))
+-              n = __copy_from_user(to, from, n);
+-      else
+-              memset(to, 0, n);
+-      return n;
++      current_thread_info()->addr_limit = x;
++      __set_fs(x);
+ }
+-EXPORT_SYMBOL(_copy_from_user);
++EXPORT_SYMBOL(set_fs);
++#endif
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 906fea3..0194a18 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+       might_fault();
+       /* no memory constraint because it doesn't change any memory gcc knows
+          about */
++      pax_open_userland();
+       stac();
+       asm volatile(
+               "       testq  %[size8],%[size8]\n"
+@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+               _ASM_EXTABLE(0b,3b)
+               _ASM_EXTABLE(1b,2b)
+               : [size8] "=&c"(size), [dst] "=&D" (__d0)
+-              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
++              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
+                 [zero] "r" (0UL), [eight] "r" (8UL));
+       clac();
++      pax_close_userland();
+       return size;
+ }
+ EXPORT_SYMBOL(__clear_user);
+@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
+ }
+ EXPORT_SYMBOL(clear_user);
+-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
+ {
+-      if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
+-              return copy_user_generic((__force void *)to, (__force void *)from, len);
+-      } 
+-      return len;             
++      if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
++              return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
++      return len;
+ }
+ EXPORT_SYMBOL(copy_in_user);
+@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
+  * it is not necessary to optimize tail handling.
+  */
+ unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
+ {
+       char c;
+       unsigned zero_len;
++      clac();
++      pax_close_userland();
+       for (; len; --len, to++) {
+               if (__get_user_nocheck(c, from++, sizeof(char)))
+                       break;
+@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+       for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
+               if (__put_user_nocheck(c, to++, sizeof(char)))
+                       break;
+-      clac();
+       return len;
+ }
+diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
+index 23d8e5f..9ccc13a 100644
+--- a/arch/x86/mm/Makefile
++++ b/arch/x86/mm/Makefile
+@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA)              += srat.o
+ obj-$(CONFIG_NUMA_EMU)                += numa_emulation.o
+ obj-$(CONFIG_MEMTEST)         += memtest.o
++
++quote:="
++obj-$(CONFIG_X86_64)          += uderef_64.o
++CFLAGS_uderef_64.o            := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 903ec1e..c4166b2 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -6,12 +6,24 @@
+ static inline unsigned long
+ ex_insn_addr(const struct exception_table_entry *x)
+ {
+-      return (unsigned long)&x->insn + x->insn;
++      unsigned long reloc = 0;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
++      return (unsigned long)&x->insn + x->insn + reloc;
+ }
+ static inline unsigned long
+ ex_fixup_addr(const struct exception_table_entry *x)
+ {
+-      return (unsigned long)&x->fixup + x->fixup;
++      unsigned long reloc = 0;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
++      return (unsigned long)&x->fixup + x->fixup + reloc;
+ }
+ int fixup_exception(struct pt_regs *regs)
+@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
+       unsigned long new_ip;
+ #ifdef CONFIG_PNPBIOS
+-      if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++      if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+               extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+               extern u32 pnp_bios_is_utter_crap;
+               pnp_bios_is_utter_crap = 1;
+@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
+               i += 4;
+               p->fixup -= i;
+               i += 4;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
++              p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+       }
+ }
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 654be4a..a4a3da1 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -14,11 +14,18 @@
+ #include <linux/hugetlb.h>            /* hstate_index_to_shift        */
+ #include <linux/prefetch.h>           /* prefetchw                    */
+ #include <linux/context_tracking.h>   /* exception_enter(), ...       */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+ #include <asm/traps.h>                        /* dotraplinkage, ...           */
+ #include <asm/pgalloc.h>              /* pgd_*(), ...                 */
+ #include <asm/kmemcheck.h>            /* kmemcheck_*(), ...           */
+ #include <asm/fixmap.h>                       /* VSYSCALL_START               */
++#include <asm/tlbflush.h>
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/stacktrace.h>
++#endif
+ /*
+  * Page fault error code bits:
+@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
+       int ret = 0;
+       /* kprobe_running() needs smp_processor_id() */
+-      if (kprobes_built_in() && !user_mode_vm(regs)) {
++      if (kprobes_built_in() && !user_mode(regs)) {
+               preempt_disable();
+               if (kprobe_running() && kprobe_fault_handler(regs, 14))
+                       ret = 1;
+@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+               return !instr_lo || (instr_lo>>1) == 1;
+       case 0x00:
+               /* Prefetch instruction is 0x0F0D or 0x0F18 */
+-              if (probe_kernel_address(instr, opcode))
++              if (user_mode(regs)) {
++                      if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++                              return 0;
++              } else if (probe_kernel_address(instr, opcode))
+                       return 0;
+               *prefetch = (instr_lo == 0xF) &&
+@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+       while (instr < max_instr) {
+               unsigned char opcode;
+-              if (probe_kernel_address(instr, opcode))
++              if (user_mode(regs)) {
++                      if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++                              break;
++              } else if (probe_kernel_address(instr, opcode))
+                       break;
+               instr++;
+@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+       force_sig_info(si_signo, &info, tsk);
+ }
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++
++      pgd = pgd_offset(mm, address);
++      if (!pgd_present(*pgd))
++              return NULL;
++      pud = pud_offset(pgd, address);
++      if (!pud_present(*pud))
++              return NULL;
++      pmd = pmd_offset(pud, address);
++      if (!pmd_present(*pmd))
++              return NULL;
++      return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
+       for (address = VMALLOC_START & PMD_MASK;
+            address >= TASK_SIZE && address < FIXADDR_TOP;
+            address += PMD_SIZE) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
+               spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = get_cpu_pgd(cpu, user);
++                      pmd_t *ret;
++
++                      ret = vmalloc_sync_one(pgd, address);
++                      if (!ret)
++                              break;
++                      pgd = get_cpu_pgd(cpu, kernel);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
++                      pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+                       pmd_t *ret;
+@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
+-                      ret = vmalloc_sync_one(page_address(page), address);
++                      pgd = page_address(page);
++#endif
++
++                      ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       spin_unlock(pgt_lock);
++#endif
+                       if (!ret)
+                               break;
+@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+        * an interrupt in the middle of a task switch..
+        */
+       pgd_paddr = read_cr3();
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
++      vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
++#endif
++
+       pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+       if (!pmd_k)
+               return -1;
+@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+        * happen within a race in page table update. In the later
+        * case just flush:
+        */
+-      pgd = pgd_offset(current->active_mm, address);
++
+       pgd_ref = pgd_offset_k(address);
+       if (pgd_none(*pgd_ref))
+               return -1;
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
++      pgd = pgd_offset_cpu(smp_processor_id(), user, address);
++      if (pgd_none(*pgd)) {
++              set_pgd(pgd, *pgd_ref);
++              arch_flush_lazy_mmu_mode();
++      } else {
++              BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++      }
++      pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
++#else
++      pgd = pgd_offset(current->active_mm, address);
++#endif
++
+       if (pgd_none(*pgd)) {
+               set_pgd(pgd, *pgd_ref);
+               arch_flush_lazy_mmu_mode();
+@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+-      if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+               return 1;
+ #endif
+       return 0;
+@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+ }
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+       if (!oops_may_print())
+               return;
+-      if (error_code & PF_INSTR) {
++      if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
+               unsigned int level;
+               pte_t *pte = lookup_address(address, &level);
+               if (pte && pte_present(*pte) && !pte_exec(*pte))
+-                      printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
++                      printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
+       }
++#ifdef CONFIG_PAX_KERNEXEC
++      if (init_mm.start_code <= address && address < init_mm.end_code) {
++              if (current->signal->curr_ip)
++                      printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++                                      &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++              else
++                      printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      }
++#endif
++
+       printk(KERN_ALERT "BUG: unable to handle kernel ");
+       if (address < PAGE_SIZE)
+               printk(KERN_CONT "NULL pointer dereference");
+@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+                               return;
+               }
+ #endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              if (pax_is_fetch_fault(regs, error_code, address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      switch (pax_handle_fetch_fault(regs)) {
++                      case 2:
++                              return;
++                      }
++#endif
++
++                      pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               /* Kernel addresses are always protection faults: */
+               if (address >= TASK_SIZE)
+                       error_code |= PF_PROT;
+@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+       if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+               printk(KERN_ERR
+       "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+-                      tsk->comm, tsk->pid, address);
++                      tsk->comm, task_pid_nr(tsk), address);
+               code = BUS_MCEERR_AR;
+       }
+ #endif
+@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+       return 1;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++      pte_t *pte;
++      pmd_t *pmd;
++      spinlock_t *ptl;
++      unsigned char pte_mask;
++
++      if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++          !(mm->pax_flags & MF_PAX_PAGEEXEC))
++              return 0;
++
++      /* PaX: it's our fault, let's handle it if we can */
++
++      /* PaX: take a look at read faults before acquiring any locks */
++      if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++              /* instruction fetch attempt from a protected page in user mode */
++              up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++              switch (pax_handle_fetch_fault(regs)) {
++              case 2:
++                      return 1;
++              }
++#endif
++
++              pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++              do_group_exit(SIGKILL);
++      }
++
++      pmd = pax_get_pmd(mm, address);
++      if (unlikely(!pmd))
++              return 0;
++
++      pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++      if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++              pte_unmap_unlock(pte, ptl);
++              return 0;
++      }
++
++      if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++              /* write attempt to a protected page in user mode */
++              pte_unmap_unlock(pte, ptl);
++              return 0;
++      }
++
++#ifdef CONFIG_SMP
++      if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++      if (likely(address > get_limit(regs->cs)))
++#endif
++      {
++              set_pte(pte, pte_mkread(*pte));
++              __flush_tlb_one(address);
++              pte_unmap_unlock(pte, ptl);
++              up_read(&mm->mmap_sem);
++              return 1;
++      }
++
++      pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++      /*
++       * PaX: fill DTLB with user rights and retry
++       */
++      __asm__ __volatile__ (
++              "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++              "invlpg (%0)\n"
++#endif
++              __copyuser_seg"testb $0,(%0)\n"
++              "xorb %3,(%1)\n"
++              :
++              : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
++              : "memory", "cc");
++      pte_unmap_unlock(pte, ptl);
++      up_read(&mm->mmap_sem);
++      return 1;
++}
++#endif
++
+ /*
+  * Handle a spurious fault caused by a stale TLB entry.
+  *
+@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, struct vm_area_struct *vma)
+ {
++      if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++              return 1;
++
+       if (error_code & PF_WRITE) {
+               /* write, present and write, not present: */
+               if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+       if (error_code & PF_USER)
+               return false;
+-      if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
++      if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
+               return false;
+       return true;
+@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ {
+       struct vm_area_struct *vma;
+       struct task_struct *tsk;
+-      unsigned long address;
+       struct mm_struct *mm;
+       int fault;
+       int write = error_code & PF_WRITE;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                       (write ? FAULT_FLAG_WRITE : 0);
+-      tsk = current;
+-      mm = tsk->mm;
+-
+       /* Get the faulting address: */
+-      address = read_cr2();
++      unsigned long address = read_cr2();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
++              if (!search_exception_tables(regs->ip)) {
++                      printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++                      bad_area_nosemaphore(regs, error_code, address);
++                      return;
++              }
++              if (address < pax_user_shadow_base) {
++                      printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++                      printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
++                      show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
++              } else
++                      address -= pax_user_shadow_base;
++      }
++#endif
++
++      tsk = current;
++      mm = tsk->mm;
+       /*
+        * Detect and handle instructions that would cause a page fault for
+@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+        * User-mode registers count as a user access even for any
+        * potential system fault or CPU buglet:
+        */
+-      if (user_mode_vm(regs)) {
++      if (user_mode(regs)) {
+               local_irq_enable();
+               error_code |= PF_USER;
+       } else {
+@@ -1142,6 +1365,11 @@ retry:
+               might_sleep();
+       }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++      if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++              return;
++#endif
++
+       vma = find_vma(mm, address);
+       if (unlikely(!vma)) {
+               bad_area(regs, error_code, address);
+@@ -1153,18 +1381,24 @@ retry:
+               bad_area(regs, error_code, address);
+               return;
+       }
+-      if (error_code & PF_USER) {
+-              /*
+-               * Accessing the stack below %sp is always a bug.
+-               * The large cushion allows instructions like enter
+-               * and pusha to work. ("enter $65535, $31" pushes
+-               * 32 pointers and then decrements %sp by 65535.)
+-               */
+-              if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+-                      bad_area(regs, error_code, address);
+-                      return;
+-              }
++      /*
++       * Accessing the stack below %sp is always a bug.
++       * The large cushion allows instructions like enter
++       * and pusha to work. ("enter $65535, $31" pushes
++       * 32 pointers and then decrements %sp by 65535.)
++       */
++      if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++              bad_area(regs, error_code, address);
++              return;
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++              bad_area(regs, error_code, address);
++              return;
++      }
++#endif
++
+       if (unlikely(expand_stack(vma, address))) {
+               bad_area(regs, error_code, address);
+               return;
+@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+       __do_page_fault(regs, error_code);
+       exception_exit(prev_state);
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++{
++      struct mm_struct *mm = current->mm;
++      unsigned long ip = regs->ip;
++
++      if (v8086_mode(regs))
++              ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++              if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
++                      return true;
++              if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
++                      return true;
++              return false;
++      }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
++                      return true;
++              return false;
++      }
++#endif
++
++      return false;
++}
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++      int err;
++
++      do { /* PaX: libffi trampoline emulation */
++              unsigned char mov, jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 9) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++              if (err)
++                      break;
++
++              if (mov == 0xB8 && jmp == 0xE9) {
++                      regs->ax = addr1;
++                      regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #1 */
++              unsigned char mov1, mov2;
++              unsigned short jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 11) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov1, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++              err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++                      regs->cx = addr1;
++                      regs->ax = addr2;
++                      regs->ip = addr2;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #2 */
++              unsigned char mov, jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 9) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++              if (err)
++                      break;
++
++              if (mov == 0xB9 && jmp == 0xE9) {
++                      regs->cx = addr1;
++                      regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++                      return 2;
++              }
++      } while (0);
++
++      return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++      int err;
++
++      do { /* PaX: libffi trampoline emulation */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char stcclc, jmp2;
++              unsigned long addr1, addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++              err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      if (stcclc == 0xF8)
++                              regs->flags &= ~X86_EFLAGS_CF;
++                      else
++                              regs->flags |= X86_EFLAGS_CF;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #1 */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char jmp2;
++              unsigned int addr1;
++              unsigned long addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #2 */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char jmp2;
++              unsigned long addr1, addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++      if (v8086_mode(regs))
++              return 1;
++
++      if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++              return 1;
++
++#ifdef CONFIG_X86_32
++      return pax_handle_fetch_fault_32(regs);
++#else
++      if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++              return pax_handle_fetch_fault_32(regs);
++      else
++              return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (unsigned char __force_user *)pc+i))
++                      printk(KERN_CONT "?? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++
++      printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++      for (i = -1; i < 80 / (long)sizeof(long); i++) {
++              unsigned long c;
++              if (get_user(c, (unsigned long __force_user *)sp+i)) {
++#ifdef CONFIG_X86_32
++                      printk(KERN_CONT "???????? ");
++#else
++                      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
++                              printk(KERN_CONT "???????? ???????? ");
++                      else
++                              printk(KERN_CONT "???????????????? ");
++#endif
++              } else {
++#ifdef CONFIG_X86_64
++                      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
++                              printk(KERN_CONT "%08x ", (unsigned int)c);
++                              printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
++                      } else
++#endif
++                              printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++              }
++      }
++      printk("\n");
++}
++#endif
++
++/**
++ * probe_kernel_write(): safely attempt to write to a location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src.  If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
++{
++      long ret;
++      mm_segment_t old_fs = get_fs();
++
++      set_fs(KERNEL_DS);
++      pagefault_disable();
++      pax_open_kernel();
++      ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
++      pax_close_kernel();
++      pagefault_enable();
++      set_fs(old_fs);
++
++      return ret ? -EFAULT : 0;
++}
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index dd74e46..7d26398 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+       addr = start;
+       len = (unsigned long) nr_pages << PAGE_SHIFT;
+       end = start + len;
+-      if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++      if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+                                       (void __user *)start, len)))
+               return 0;
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 252b8f5..4dcfdc1 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++      pax_open_kernel();
+       set_pte(kmap_pte-idx, mk_pte(page, prot));
++      pax_close_kernel();
++
+       arch_flush_lazy_mmu_mode();
+       return (void *)vaddr;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index ae1aa71..d9bea75 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+               unsigned long addr, unsigned long len,
+-              unsigned long pgoff, unsigned long flags)
++              unsigned long pgoff, unsigned long flags, unsigned long offset)
+ {
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+-
++      
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++              info.low_limit += current->mm->delta_mmap;
++#endif
++
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+               unsigned long addr0, unsigned long len,
+-              unsigned long pgoff, unsigned long flags)
++              unsigned long pgoff, unsigned long flags, unsigned long offset)
+ {
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+       info.high_limit = current->mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += current->mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct hstate *h = hstate_file(file);
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
++      unsigned long pax_task_size = TASK_SIZE;
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+-      if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (len > pax_task_size)
+               return -ENOMEM;
+       if (flags & MAP_FIXED) {
+@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+       else
+               return hugetlb_get_unmapped_area_topdown(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+ }
+ #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 1f34e92..c97b98f 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -4,6 +4,7 @@
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h>    /* for max_low_pfn */
++#include <linux/tboot.h>
+ #include <asm/cacheflush.h>
+ #include <asm/e820.h>
+@@ -17,6 +18,8 @@
+ #include <asm/proto.h>
+ #include <asm/dma.h>          /* for MAX_DMA_PFN */
+ #include <asm/microcode.h>
++#include <asm/desc.h>
++#include <asm/bios_ebda.h>
+ #include "mm_internal.h"
+@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
+       early_ioremap_page_table_range_init();
+ #endif
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      load_cr3(get_cpu_pgd(0, kernel));
++#else
+       load_cr3(swapper_pg_dir);
++#endif
++
+       __flush_tlb_all();
+       early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
+@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
+  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+  * mmio resources as well as potential bios/acpi data regions.
+  */
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+-      if (pagenr < 256)
++#ifdef CONFIG_GRKERNSEC_KMEM
++      /* allow BDA */
++      if (!pagenr)
+               return 1;
++      /* allow EBDA */
++      if (pagenr >= ebda_start && pagenr < ebda_end)
++              return 1;
++      /* if tboot is in use, allow access to its hardcoded serial log range */
++      if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
++              return 1;
++#else
++      if (!pagenr)
++              return 1;
++#ifdef CONFIG_VM86
++      if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++              return 1;
++#endif
++#endif
++
++      if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
++              return 1;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      /* throw out everything else below 1MB */
++      if (pagenr <= 256)
++              return 0;
++#endif
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pagenr))
+@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ #endif
+ }
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++      unsigned int ebda_addr;
++      unsigned int ebda_size = 0;
++
++      ebda_addr = get_bios_ebda();
++      if (ebda_addr) {
++              ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++              ebda_size <<= 10;
++      }
++      if (ebda_addr && ebda_size) {
++              ebda_start = ebda_addr >> PAGE_SHIFT;
++              ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
++      } else {
++              ebda_start = 0x9f000 >> PAGE_SHIFT;
++              ebda_end = 0xa0000 >> PAGE_SHIFT;
++      }
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
+ void free_initmem(void)
+ {
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++      /* PaX: limit KERNEL_CS to actual size */
++      unsigned long addr, limit;
++      struct desc_struct d;
++      int cpu;
++#else
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++      unsigned long addr, end;
++#endif
++#endif
++
++      gr_init_ebda();
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++      limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++      limit = (limit - 1UL) >> PAGE_SHIFT;
++
++      memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++      for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++              pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++              write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++              write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
++      }
++
++      /* PaX: make KERNEL_CS read-only */
++      addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++      if (!paravirt_enabled())
++              set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++              for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++                      set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++              }
++*/
++#ifdef CONFIG_X86_PAE
++      set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++/*
++      for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++              pgd = pgd_offset_k(addr);
++              pud = pud_offset(pgd, addr);
++              pmd = pmd_offset(pud, addr);
++              set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++      }
++*/
++#endif
++
++#ifdef CONFIG_MODULES
++      set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++      /* PaX: make kernel code/rodata read-only, rest non-executable */
++      for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++              pgd = pgd_offset_k(addr);
++              pud = pud_offset(pgd, addr);
++              pmd = pmd_offset(pud, addr);
++              if (!pmd_present(*pmd))
++                      continue;
++              if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++                      set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++              else
++                      set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++      }
++
++      addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++      end = addr + KERNEL_IMAGE_SIZE;
++      for (; addr < end; addr += PMD_SIZE) {
++              pgd = pgd_offset_k(addr);
++              pud = pud_offset(pgd, addr);
++              pmd = pmd_offset(pud, addr);
++              if (!pmd_present(*pmd))
++                      continue;
++              if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++                      set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++      }
++#endif
++
++      flush_tlb_all();
++#endif
++
+       free_init_pages("unused kernel memory",
+                       (unsigned long)(&__init_begin),
+                       (unsigned long)(&__init_end));
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 3ac7e31..89611b7 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
+ bool __read_mostly __vmalloc_start_set = false;
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+-      pud_t *pud;
+-      pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+-      if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+-              pmd_table = (pmd_t *)alloc_low_page();
+-              paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+-              set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+-              pud = pud_offset(pgd, 0);
+-              BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+-              return pmd_table;
+-      }
+-#endif
+-      pud = pud_offset(pgd, 0);
+-      pmd_table = pmd_offset(pud, 0);
+-
+-      return pmd_table;
+-}
+-
+-/*
+  * Create a page table and place a pointer to it in a middle page
+  * directory entry:
+  */
+@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
+               pte_t *page_table = (pte_t *)alloc_low_page();
+               paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+               BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+       }
+       return pte_offset_kernel(pmd, 0);
+ }
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++      pud_t *pud;
++      pmd_t *pmd_table;
++
++      pud = pud_offset(pgd, 0);
++      pmd_table = pmd_offset(pud, 0);
++
++      return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+       int pgd_idx = pgd_index(vaddr);
+@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       int pgd_idx, pmd_idx;
+       unsigned long vaddr;
+       pgd_t *pgd;
++      pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte = NULL;
+       unsigned long count = page_table_range_init_count(start, end);
+@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       pgd = pgd_base + pgd_idx;
+       for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+-              pmd = one_md_table_init(pgd);
+-              pmd = pmd + pmd_index(vaddr);
++              pud = pud_offset(pgd, vaddr);
++              pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++              paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+                                                       pmd++, pmd_idx++) {
+                       pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       }
+ }
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+-      if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
+-              return 1;
+-      return 0;
++      if ((start > ktla_ktva((unsigned long)_etext) ||
++           end <= ktla_ktva((unsigned long)_stext)) &&
++          (start > ktla_ktva((unsigned long)_einittext) ||
++           end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++          (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++          (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++              return 0;
++      return 1;
+ }
+ /*
+@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
+       unsigned long last_map_addr = end;
+       unsigned long start_pfn, end_pfn;
+       pgd_t *pgd_base = swapper_pg_dir;
+-      int pgd_idx, pmd_idx, pte_ofs;
++      unsigned int pgd_idx, pmd_idx, pte_ofs;
+       unsigned long pfn;
+       pgd_t *pgd;
++      pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned pages_2m, pages_4k;
+@@ -291,8 +295,13 @@ repeat:
+       pfn = start_pfn;
+       pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
+-      for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+-              pmd = one_md_table_init(pgd);
++      for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++              pud = pud_offset(pgd, 0);
++              pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++              paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+               if (pfn >= end_pfn)
+                       continue;
+@@ -304,14 +313,13 @@ repeat:
+ #endif
+               for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+                    pmd++, pmd_idx++) {
+-                      unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++                      unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+                       /*
+                        * Map with big pages if possible, otherwise
+                        * create normal page tables:
+                        */
+                       if (use_pse) {
+-                              unsigned int addr2;
+                               pgprot_t prot = PAGE_KERNEL_LARGE;
+                               /*
+                                * first pass will use the same initial
+@@ -322,11 +330,7 @@ repeat:
+                                                _PAGE_PSE);
+                               pfn &= PMD_MASK >> PAGE_SHIFT;
+-                              addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+-                                      PAGE_OFFSET + PAGE_SIZE-1;
+-
+-                              if (is_kernel_text(addr) ||
+-                                  is_kernel_text(addr2))
++                              if (is_kernel_text(address, address + PMD_SIZE))
+                                       prot = PAGE_KERNEL_LARGE_EXEC;
+                               pages_2m++;
+@@ -343,7 +347,7 @@ repeat:
+                       pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+                       pte += pte_ofs;
+                       for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+-                           pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++                           pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+                               pgprot_t prot = PAGE_KERNEL;
+                               /*
+                                * first pass will use the same initial
+@@ -351,7 +355,7 @@ repeat:
+                                */
+                               pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+-                              if (is_kernel_text(addr))
++                              if (is_kernel_text(address, address + PAGE_SIZE))
+                                       prot = PAGE_KERNEL_EXEC;
+                               pages_4k++;
+@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
+               pud = pud_offset(pgd, va);
+               pmd = pmd_offset(pud, va);
+-              if (!pmd_present(*pmd))
++              if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
+                       break;
+               /* should not be large page here */
+@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
+ static void __init pagetable_init(void)
+ {
+-      pgd_t *pgd_base = swapper_pg_dir;
+-
+-      permanent_kmaps_init(pgd_base);
++      permanent_kmaps_init(swapper_pg_dir);
+ }
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+ /* user-defined highmem size */
+@@ -772,7 +774,7 @@ void __init mem_init(void)
+       after_bootmem = 1;
+       codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+-      datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++      datasize =  (unsigned long) &_edata - (unsigned long) &_sdata;
+       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+       printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
+@@ -813,10 +815,10 @@ void __init mem_init(void)
+               ((unsigned long)&__init_end -
+                (unsigned long)&__init_begin) >> 10,
+-              (unsigned long)&_etext, (unsigned long)&_edata,
+-              ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++              (unsigned long)&_sdata, (unsigned long)&_edata,
++              ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+-              (unsigned long)&_text, (unsigned long)&_etext,
++              ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+               ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+       /*
+@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
+       if (!kernel_set_to_readonly)
+               return;
++      start = ktla_ktva(start);
+       pr_debug("Set kernel text: %lx - %lx for read write\n",
+                start, start+size);
+@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
+       if (!kernel_set_to_readonly)
+               return;
++      start = ktla_ktva(start);
+       pr_debug("Set kernel text: %lx - %lx for read only\n",
+                start, start+size);
+@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
+       unsigned long start = PFN_ALIGN(_text);
+       unsigned long size = PFN_ALIGN(_etext) - start;
++      start = ktla_ktva(start);
+       set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+       printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+               size >> 10);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index bb00c46..bf91a67 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
+  * around without checking the pgd every time.
+  */
+-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+ int force_personality32;
+@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+       for (address = start; address <= end; address += PGDIR_SIZE) {
+               const pgd_t *pgd_ref = pgd_offset_k(address);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
+               if (pgd_none(*pgd_ref))
+                       continue;
+               spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
++
++                      if (pgd_none(*pgd))
++                              set_pgd(pgd, *pgd_ref);
++                      else
++                              BUG_ON(pgd_page_vaddr(*pgd)
++                                     != pgd_page_vaddr(*pgd_ref));
++                      pgd = pgd_offset_cpu(cpu, kernel, address);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+                       /* the pgt_lock only for Xen */
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
++#endif
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+                               BUG_ON(pgd_page_vaddr(*pgd)
+                                      != pgd_page_vaddr(*pgd_ref));
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       spin_unlock(pgt_lock);
++#endif
++
+               }
+               spin_unlock(&pgd_lock);
+       }
+@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
+ {
+       if (pgd_none(*pgd)) {
+               pud_t *pud = (pud_t *)spp_getpage();
+-              pgd_populate(&init_mm, pgd, pud);
++              pgd_populate_kernel(&init_mm, pgd, pud);
+               if (pud != pud_offset(pgd, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+                              pud, pud_offset(pgd, 0));
+@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
+ {
+       if (pud_none(*pud)) {
+               pmd_t *pmd = (pmd_t *) spp_getpage();
+-              pud_populate(&init_mm, pud, pmd);
++              pud_populate_kernel(&init_mm, pud, pmd);
+               if (pmd != pmd_offset(pud, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
+                              pmd, pmd_offset(pud, 0));
+@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+       pmd = fill_pmd(pud, vaddr);
+       pte = fill_pte(pmd, vaddr);
++      pax_open_kernel();
+       set_pte(pte, new_pte);
++      pax_close_kernel();
+       /*
+        * It's enough to flush this one mapping.
+@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+               pgd = pgd_offset_k((unsigned long)__va(phys));
+               if (pgd_none(*pgd)) {
+                       pud = (pud_t *) spp_getpage();
+-                      set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+-                                              _PAGE_USER));
++                      set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+               }
+               pud = pud_offset(pgd, (unsigned long)__va(phys));
+               if (pud_none(*pud)) {
+                       pmd = (pmd_t *) spp_getpage();
+-                      set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+-                                              _PAGE_USER));
++                      set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+               }
+               pmd = pmd_offset(pud, phys);
+               BUG_ON(!pmd_none(*pmd));
+@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+                                             prot);
+               spin_lock(&init_mm.page_table_lock);
+-              pud_populate(&init_mm, pud, pmd);
++              pud_populate_kernel(&init_mm, pud, pmd);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+       __flush_tlb_all();
+@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
+                                                page_size_mask);
+               spin_lock(&init_mm.page_table_lock);
+-              pgd_populate(&init_mm, pgd, pud);
++              pgd_populate_kernel(&init_mm, pgd, pud);
+               spin_unlock(&init_mm.page_table_lock);
+               pgd_changed = true;
+       }
+@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+       .vm_start       = VSYSCALL_START,
+       .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+-      .vm_page_prot   = PAGE_READONLY_EXEC,
+-      .vm_flags       = VM_READ | VM_EXEC
++      .vm_page_prot   = PAGE_READONLY,
++      .vm_flags       = VM_READ
+ };
+ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+-      if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++      if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+               return "[vdso]";
+       if (vma == &gate_vma)
+               return "[vsyscall]";
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 7b179b4..6bd17777 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++      pax_open_kernel();
+       set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++      pax_close_kernel();
++
+       arch_flush_lazy_mmu_mode();
+       return (void *)vaddr;
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 9a1e658..da003f3 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+       for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+               int is_ram = page_is_ram(pfn);
+-              if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
++              if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
+                       return NULL;
+               WARN_ON_ONCE(is_ram);
+       }
+@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
+  *
+  * Caller must ensure there is only one unmapping for the same pointer.
+  */
+-void iounmap(volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *addr)
+ {
+       struct vm_struct *p, *o;
+@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
+       /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
+       if (page_is_ram(start >> PAGE_SHIFT))
++#ifdef CONFIG_HIGHMEM
++      if ((start >> PAGE_SHIFT) < max_low_pfn)
++#endif
+               return __va(phys);
+       addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
+@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
+ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
+ {
+       if (page_is_ram(phys >> PAGE_SHIFT))
++#ifdef CONFIG_HIGHMEM
++      if ((phys >> PAGE_SHIFT) < max_low_pfn)
++#endif
+               return;
+       iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
+@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
+               slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+-      memset(bm_pte, 0, sizeof(bm_pte));
+-      pmd_populate_kernel(&init_mm, pmd, bm_pte);
++      pmd_populate_user(&init_mm, pmd, bm_pte);
+       /*
+        * The boot-ioremap range spans multiple pmds, for which
+diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
+index d87dd6d..bf3fa66 100644
+--- a/arch/x86/mm/kmemcheck/kmemcheck.c
++++ b/arch/x86/mm/kmemcheck/kmemcheck.c
+@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+        * memory (e.g. tracked pages)? For now, we need this to avoid
+        * invoking kmemcheck for PnP BIOS calls.
+        */
+-      if (regs->flags & X86_VM_MASK)
++      if (v8086_mode(regs))
+               return false;
+-      if (regs->cs != __KERNEL_CS)
++      if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
+               return false;
+       pte = kmemcheck_pte_lookup(address);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index c1af323..4758dad 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
+  * Leave an at least ~128 MB hole with possible stack randomization.
+  */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+ static int mmap_is_legacy(void)
+ {
+@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
+       return rnd << PAGE_SHIFT;
+ }
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+       unsigned long gap = rlimit(RLIMIT_STACK);
++      unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+       if (gap < MIN_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
+-      return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++      return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+ /*
+  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+  * does, but not when emulating X86_32
+  */
+-unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+-      if (mmap_is_ia32())
++      if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (mm->pax_flags & MF_PAX_SEGMEXEC)
++                      return SEGMEXEC_TASK_UNMAPPED_BASE;
++              else
++#endif
++
+               return TASK_UNMAPPED_BASE;
+-      else
++      } else
+               return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+@@ -113,11 +126,23 @@ unsigned long mmap_legacy_base(void)
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       if (mmap_is_legacy()) {
+-              mm->mmap_base = mmap_legacy_base();
++              mm->mmap_base = mmap_legacy_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+-              mm->mmap_base = mmap_base();
++              mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
+index dc0b727..f612039 100644
+--- a/arch/x86/mm/mmio-mod.c
++++ b/arch/x86/mm/mmio-mod.c
+@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
+               break;
+       default:
+               {
+-                      unsigned char *ip = (unsigned char *)instptr;
++                      unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
+                       my_trace->opcode = MMIO_UNKNOWN_OP;
+                       my_trace->width = 0;
+                       my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
+ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr)
+ {
+-      static atomic_t next_id;
++      static atomic_unchecked_t next_id;
+       struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+       /* These are page-unaligned. */
+       struct mmiotrace_map map = {
+@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+                       .private = trace
+               },
+               .phys = offset,
+-              .id = atomic_inc_return(&next_id)
++              .id = atomic_inc_return_unchecked(&next_id)
+       };
+       map.map_id = trace->id;
+@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+       ioremap_trace_core(offset, size, addr);
+ }
+-static void iounmap_trace_core(volatile void __iomem *addr)
++static void iounmap_trace_core(const volatile void __iomem *addr)
+ {
+       struct mmiotrace_map map = {
+               .phys = 0,
+@@ -328,7 +328,7 @@ not_enabled:
+       }
+ }
+-void mmiotrace_iounmap(volatile void __iomem *addr)
++void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+       might_sleep();
+       if (is_enabled()) /* recheck and proper locking in *_core() */
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index a71c4e2..301ae44 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+       return true;
+ }
+-static int __init numa_register_memblks(struct numa_meminfo *mi)
++static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
+ {
+       unsigned long uninitialized_var(pfn_align);
+       int i, nid;
+diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
+index d0b1773..4c3327c 100644
+--- a/arch/x86/mm/pageattr-test.c
++++ b/arch/x86/mm/pageattr-test.c
+@@ -36,7 +36,7 @@ enum {
+ static int pte_testbit(pte_t pte)
+ {
+-      return pte_flags(pte) & _PAGE_UNUSED1;
++      return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+ struct split_state {
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index bb32480..75f2f5e 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+        */
+ #ifdef CONFIG_PCI_BIOS
+       if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
+-              pgprot_val(forbidden) |= _PAGE_NX;
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+ #endif
+       /*
+@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+        * Does not cover __inittext since that is gone later on. On
+        * 64bit we do not enforce !NX on the low mapping
+        */
+-      if (within(address, (unsigned long)_text, (unsigned long)_etext))
+-              pgprot_val(forbidden) |= _PAGE_NX;
++      if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++#ifdef CONFIG_DEBUG_RODATA
+       /*
+        * The .rodata section needs to be read-only. Using the pfn
+        * catches all aliases.
+@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+       if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+                  __pa_symbol(__end_rodata) >> PAGE_SHIFT))
+               pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+       /*
+@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+       }
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++      if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
++              pgprot_val(forbidden) |= _PAGE_RW;
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++      }
++#endif
++
+       prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+       return prot;
+@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+       /* change init_mm */
++      pax_open_kernel();
+       set_pte_atomic(kpte, pte);
++
+ #ifdef CONFIG_X86_32
+       if (!SHARED_KERNEL_PMD) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = get_cpu_pgd(cpu, kernel);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
+-                      pgd_t *pgd;
++                      pgd_t *pgd = (pgd_t *)page_address(page);
++#endif
++
+                       pud_t *pud;
+                       pmd_t *pmd;
+-                      pgd = (pgd_t *)page_address(page) + pgd_index(address);
++                      pgd += pgd_index(address);
+                       pud = pud_offset(pgd, address);
+                       pmd = pmd_offset(pud, address);
+                       set_pte_atomic((pte_t *)pmd, pte);
+               }
+       }
+ #endif
++      pax_close_kernel();
+ }
+ static int
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index 6574388..87e9bef 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
+       if (!entry) {
+               printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+-                     current->comm, current->pid, start, end - 1);
++                      current->comm, task_pid_nr(current), start, end - 1);
+               return -EINVAL;
+       }
+@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+       while (cursor < to) {
+               if (!devmem_is_allowed(pfn)) {
+-                      printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+-                              current->comm, from, to - 1);
++                      printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
++                              current->comm, from, to - 1, cursor);
+                       return 0;
+               }
+               cursor += PAGE_SIZE;
+@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+               printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+                       "for [mem %#010Lx-%#010Lx]\n",
+-                      current->comm, current->pid,
++                      current->comm, task_pid_nr(current),
+                       cattr_name(flags),
+                       base, (unsigned long long)(base + size-1));
+               return -EINVAL;
+@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+               flags = lookup_memtype(paddr);
+               if (want_flags != flags) {
+                       printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
+-                              current->comm, current->pid,
++                              current->comm, task_pid_nr(current),
+                               cattr_name(want_flags),
+                               (unsigned long long)paddr,
+                               (unsigned long long)(paddr + size - 1),
+@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+                       free_memtype(paddr, paddr + size);
+                       printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+                               " for [mem %#010Lx-%#010Lx], got %s\n",
+-                              current->comm, current->pid,
++                              current->comm, task_pid_nr(current),
+                               cattr_name(want_flags),
+                               (unsigned long long)paddr,
+                               (unsigned long long)(paddr + size - 1),
+diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
+index 415f6c4..d319983 100644
+--- a/arch/x86/mm/pat_rbtree.c
++++ b/arch/x86/mm/pat_rbtree.c
+@@ -160,7 +160,7 @@ success:
+ failure:
+       printk(KERN_INFO "%s:%d conflicting memory types "
+-              "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
++              "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
+               end, cattr_name(found_type), cattr_name(match->type));
+       return -EBUSY;
+ }
+diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
+index 9f0614d..92ae64a 100644
+--- a/arch/x86/mm/pf_in.c
++++ b/arch/x86/mm/pf_in.c
+@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
+       int i;
+       enum reason_type rv = OTHERS;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 17fda6a..f7d54a0 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
+       list_del(&page->lru);
+ }
+-#define UNSHARED_PTRS_PER_PGD                         \
+-      (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++      unsigned int count = USER_PGD_PTRS;
++      if (!pax_user_shadow_base)
++              return;
++
++      while (count--)
++              *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
++}
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++      unsigned int count = USER_PGD_PTRS;
++
++      while (count--) {
++              pgd_t pgd;
++
++#ifdef CONFIG_X86_64
++              pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
++#else
++              pgd = *src++;
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
++#endif
++
++              *dst++ = pgd;
++      }
++
++}
++#endif
++
++#ifdef CONFIG_X86_64
++#define pxd_t                         pud_t
++#define pyd_t                         pgd_t
++#define paravirt_release_pxd(pfn)     paravirt_release_pud(pfn)
++#define pxd_free(mm, pud)             pud_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud)    pgd_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address)               pgd_offset((mm), (address))
++#define PYD_SIZE                      PGDIR_SIZE
++#else
++#define pxd_t                         pmd_t
++#define pyd_t                         pud_t
++#define paravirt_release_pxd(pfn)     paravirt_release_pmd(pfn)
++#define pxd_free(mm, pud)             pmd_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud)    pud_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address)               pud_offset((mm), (address))
++#define PYD_SIZE                      PUD_SIZE
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
++static inline void pgd_dtor(pgd_t *pgd) {}
++#else
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+ {
+       BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
+       pgd_list_del(pgd);
+       spin_unlock(&pgd_lock);
+ }
++#endif
+ /*
+  * List of all pgd's needed for non-PAE so it can invalidate entries
+@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
+  * -- nyc
+  */
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ /*
+  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+  * updating the top-level pagetable entries to guarantee the
+@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
+  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+  * and initialize the kernel pmds here.
+  */
+-#define PREALLOCATED_PMDS     UNSHARED_PTRS_PER_PGD
++#define PREALLOCATED_PXDS     (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ {
+@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+        */
+       flush_tlb_mm(mm);
+ }
++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
++#define PREALLOCATED_PXDS     USER_PGD_PTRS
+ #else  /* !CONFIG_X86_PAE */
+ /* No need to prepopulate any pagetable entries in non-PAE modes. */
+-#define PREALLOCATED_PMDS     0
++#define PREALLOCATED_PXDS     0
+ #endif        /* CONFIG_X86_PAE */
+-static void free_pmds(pmd_t *pmds[])
++static void free_pxds(pxd_t *pxds[])
+ {
+       int i;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++)
+-              if (pmds[i])
+-                      free_page((unsigned long)pmds[i]);
++      for(i = 0; i < PREALLOCATED_PXDS; i++)
++              if (pxds[i])
++                      free_page((unsigned long)pxds[i]);
+ }
+-static int preallocate_pmds(pmd_t *pmds[])
++static int preallocate_pxds(pxd_t *pxds[])
+ {
+       int i;
+       bool failed = false;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++) {
+-              pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
+-              if (pmd == NULL)
++      for(i = 0; i < PREALLOCATED_PXDS; i++) {
++              pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
++              if (pxd == NULL)
+                       failed = true;
+-              pmds[i] = pmd;
++              pxds[i] = pxd;
+       }
+       if (failed) {
+-              free_pmds(pmds);
++              free_pxds(pxds);
+               return -ENOMEM;
+       }
+@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
+  * preallocate which never got a corresponding vma will need to be
+  * freed manually.
+  */
+-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
+ {
+       int i;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++) {
++      for(i = 0; i < PREALLOCATED_PXDS; i++) {
+               pgd_t pgd = pgdp[i];
+               if (pgd_val(pgd) != 0) {
+-                      pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
++                      pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
+-                      pgdp[i] = native_make_pgd(0);
++                      set_pgd(pgdp + i, native_make_pgd(0));
+-                      paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+-                      pmd_free(mm, pmd);
++                      paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
++                      pxd_free(mm, pxd);
+               }
+       }
+ }
+-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
+ {
+-      pud_t *pud;
++      pyd_t *pyd;
+       unsigned long addr;
+       int i;
+-      if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
++      if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
+               return;
+-      pud = pud_offset(pgd, 0);
++#ifdef CONFIG_X86_64
++      pyd = pyd_offset(mm, 0L);
++#else
++      pyd = pyd_offset(pgd, 0L);
++#endif
+-      for (addr = i = 0; i < PREALLOCATED_PMDS;
+-           i++, pud++, addr += PUD_SIZE) {
+-              pmd_t *pmd = pmds[i];
++      for (addr = i = 0; i < PREALLOCATED_PXDS;
++           i++, pyd++, addr += PYD_SIZE) {
++              pxd_t *pxd = pxds[i];
+               if (i >= KERNEL_PGD_BOUNDARY)
+-                      memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+-                             sizeof(pmd_t) * PTRS_PER_PMD);
++                      memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
++                             sizeof(pxd_t) * PTRS_PER_PMD);
+-              pud_populate(mm, pud, pmd);
++              pyd_populate(mm, pyd, pxd);
+       }
+ }
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+       pgd_t *pgd;
+-      pmd_t *pmds[PREALLOCATED_PMDS];
++      pxd_t *pxds[PREALLOCATED_PXDS];
+       pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+       mm->pgd = pgd;
+-      if (preallocate_pmds(pmds) != 0)
++      if (preallocate_pxds(pxds) != 0)
+               goto out_free_pgd;
+       if (paravirt_pgd_alloc(mm) != 0)
+-              goto out_free_pmds;
++              goto out_free_pxds;
+       /*
+        * Make sure that pre-populating the pmds is atomic with
+@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+       spin_lock(&pgd_lock);
+       pgd_ctor(mm, pgd);
+-      pgd_prepopulate_pmd(mm, pgd, pmds);
++      pgd_prepopulate_pxd(mm, pgd, pxds);
+       spin_unlock(&pgd_lock);
+       return pgd;
+-out_free_pmds:
+-      free_pmds(pmds);
++out_free_pxds:
++      free_pxds(pxds);
+ out_free_pgd:
+       free_page((unsigned long)pgd);
+ out:
+@@ -302,7 +366,7 @@ out:
+ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ {
+-      pgd_mop_up_pmds(mm, pgd);
++      pgd_mop_up_pxds(mm, pgd);
+       pgd_dtor(pgd);
+       paravirt_pgd_free(mm, pgd);
+       free_page((unsigned long)pgd);
+diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
+index a69bcb8..19068ab 100644
+--- a/arch/x86/mm/pgtable_32.c
++++ b/arch/x86/mm/pgtable_32.c
+@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
++
++      pax_open_kernel();
+       if (pte_val(pteval))
+               set_pte_at(&init_mm, vaddr, pte, pteval);
+       else
+               pte_clear(&init_mm, vaddr, pte);
++      pax_close_kernel();
+       /*
+        * It's enough to flush this one mapping.
+diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
+index e666cbb..61788c45 100644
+--- a/arch/x86/mm/physaddr.c
++++ b/arch/x86/mm/physaddr.c
+@@ -10,7 +10,7 @@
+ #ifdef CONFIG_X86_64
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-unsigned long __phys_addr(unsigned long x)
++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
+ {
+       unsigned long y = x - __START_KERNEL_map;
+@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
+ #else
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-unsigned long __phys_addr(unsigned long x)
++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
+ {
+       unsigned long phys_addr = x - PAGE_OFFSET;
+       /* VMALLOC_* aren't constants  */
+diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
+index 410531d..0f16030 100644
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -5,8 +5,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ static int disable_nx __cpuinitdata;
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+  * noexec = on|off
+  *
+@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
+       return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
++
++#endif
+ void __cpuinit x86_configure_nx(void)
+ {
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+       if (cpu_has_nx && !disable_nx)
+               __supported_pte_mask |= _PAGE_NX;
+       else
++#endif
+               __supported_pte_mask &= ~_PAGE_NX;
+ }
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 282375f..e03a98f 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -48,7 +48,11 @@ void leave_mm(int cpu)
+               BUG();
+       if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
+               cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+               load_cr3(swapper_pg_dir);
++#endif
++
+       }
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
+new file mode 100644
+index 0000000..dace51c
+--- /dev/null
++++ b/arch/x86/mm/uderef_64.c
+@@ -0,0 +1,37 @@
++#include <linux/mm.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++/* PaX: due to the special call convention these functions must
++ * - remain leaf functions under all configurations,
++ * - never be called directly, only dereferenced from the wrappers.
++ */
++void __pax_open_userland(void)
++{
++      unsigned int cpu;
++
++      if (unlikely(!segment_eq(get_fs(), USER_DS)))
++              return;
++
++      cpu = raw_get_cpu();
++      BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
++      write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
++      raw_put_cpu_no_resched();
++}
++EXPORT_SYMBOL(__pax_open_userland);
++
++void __pax_close_userland(void)
++{
++      unsigned int cpu;
++
++      if (unlikely(!segment_eq(get_fs(), USER_DS)))
++              return;
++
++      cpu = raw_get_cpu();
++      BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
++      write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++      raw_put_cpu_no_resched();
++}
++EXPORT_SYMBOL(__pax_close_userland);
++#endif
+diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
+index 877b9a1..a8ecf42 100644
+--- a/arch/x86/net/bpf_jit.S
++++ b/arch/x86/net/bpf_jit.S
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+ /*
+  * Calling convention :
+@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
+       jle     bpf_slow_path_word
+       mov     (SKBDATA,%rsi),%eax
+       bswap   %eax                    /* ntohl() */
++      pax_force_retaddr
+       ret
+ sk_load_half:
+@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
+       jle     bpf_slow_path_half
+       movzwl  (SKBDATA,%rsi),%eax
+       rol     $8,%ax                  # ntohs()
++      pax_force_retaddr
+       ret
+ sk_load_byte:
+@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
+       cmp     %esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
+       jle     bpf_slow_path_byte
+       movzbl  (SKBDATA,%rsi),%eax
++      pax_force_retaddr
+       ret
+ /**
+@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
+       movzbl  (SKBDATA,%rsi),%ebx
+       and     $15,%bl
+       shl     $2,%bl
++      pax_force_retaddr
+       ret
+ /* rsi contains offset and can be scratched */
+@@ -109,6 +114,7 @@ bpf_slow_path_word:
+       js      bpf_error
+       mov     -12(%rbp),%eax
+       bswap   %eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_half:
+@@ -117,12 +123,14 @@ bpf_slow_path_half:
+       mov     -12(%rbp),%ax
+       rol     $8,%ax
+       movzwl  %ax,%eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte:
+       bpf_slow_path_common(1)
+       js      bpf_error
+       movzbl  -12(%rbp),%eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte_msh:
+@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
+       and     $15,%al
+       shl     $2,%al
+       xchg    %eax,%ebx
++      pax_force_retaddr
+       ret
+ #define sk_negative_common(SIZE)                              \
+@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
+       sk_negative_common(4)
+       mov     (%rax), %eax
+       bswap   %eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_half_neg:
+@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
+       mov     (%rax),%ax
+       rol     $8,%ax
+       movzwl  %ax,%eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte_neg:
+@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
+       .globl  sk_load_byte_negative_offset
+       sk_negative_common(1)
+       movzbl  (%rax), %eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte_msh_neg:
+@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
+       and     $15,%al
+       shl     $2,%al
+       xchg    %eax,%ebx
++      pax_force_retaddr
+       ret
+ bpf_error:
+@@ -197,4 +210,5 @@ bpf_error:
+       xor             %eax,%eax
+       mov             -8(%rbp),%rbx
+       leaveq
++      pax_force_retaddr
+       ret
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index f66b540..3e88dfb 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -12,6 +12,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/filter.h>
+ #include <linux/if_vlan.h>
++#include <linux/random.h>
+ /*
+  * Conventions :
+@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
+       return ptr + len;
+ }
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++#define MAX_INSTR_CODE_SIZE 96
++#else
++#define MAX_INSTR_CODE_SIZE 64
++#endif
++
+ #define EMIT(bytes, len)      do { prog = emit_code(prog, bytes, len); } while (0)
+ #define EMIT1(b1)             EMIT(b1, 1)
+ #define EMIT2(b1, b2)         EMIT((b1) + ((b2) << 8), 2)
+ #define EMIT3(b1, b2, b3)     EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
+ #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
++
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++/* original constant will appear in ecx */
++#define DILUTE_CONST_SEQUENCE(_off, _key)     \
++do {                                          \
++      /* mov ecx, randkey */                  \
++      EMIT1(0xb9);                            \
++      EMIT(_key, 4);                          \
++      /* xor ecx, randkey ^ off */            \
++      EMIT2(0x81, 0xf1);                      \
++      EMIT((_key) ^ (_off), 4);               \
++} while (0)
++
++#define EMIT1_off32(b1, _off)                                                         \
++do {                                                                                  \
++      switch (b1) {                                                                   \
++              case 0x05: /* add eax, imm32 */                                         \
++              case 0x2d: /* sub eax, imm32 */                                         \
++              case 0x25: /* and eax, imm32 */                                         \
++              case 0x0d: /* or eax, imm32 */                                          \
++              case 0xb8: /* mov eax, imm32 */                                         \
++              case 0x35: /* xor eax, imm32 */                                         \
++              case 0x3d: /* cmp eax, imm32 */                                         \
++              case 0xa9: /* test eax, imm32 */                                        \
++                      DILUTE_CONST_SEQUENCE(_off, randkey);                           \
++                      EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
++                      break;                                                          \
++              case 0xbb: /* mov ebx, imm32 */                                         \
++                      DILUTE_CONST_SEQUENCE(_off, randkey);                           \
++                      /* mov ebx, ecx */                                              \
++                      EMIT2(0x89, 0xcb);                                              \
++                      break;                                                          \
++              case 0xbe: /* mov esi, imm32 */                                         \
++                      DILUTE_CONST_SEQUENCE(_off, randkey);                           \
++                      /* mov esi, ecx */                                              \
++                      EMIT2(0x89, 0xce);                                              \
++                      break;                                                          \
++              case 0xe8: /* call rel imm32, always to known funcs */                  \
++                      EMIT1(b1);                                                      \
++                      EMIT(_off, 4);                                                  \
++                      break;                                                          \
++              case 0xe9: /* jmp rel imm32 */                                          \
++                      EMIT1(b1);                                                      \
++                      EMIT(_off, 4);                                                  \
++                      /* prevent fall-through, we're not called if off = 0 */         \
++                      EMIT(0xcccccccc, 4);                                            \
++                      EMIT(0xcccccccc, 4);                                            \
++                      break;                                                          \
++              default:                                                                \
++                      BUILD_BUG();                                                    \
++      }                                                                               \
++} while (0)
++
++#define EMIT2_off32(b1, b2, _off)                                     \
++do {                                                                  \
++      if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */  \
++              EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */         \
++              EMIT(randkey, 4);                                       \
++              EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */     \
++              EMIT((_off) - randkey, 4);                              \
++      } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
++              DILUTE_CONST_SEQUENCE(_off, randkey);                   \
++              /* imul eax, ecx */                                     \
++              EMIT3(0x0f, 0xaf, 0xc1);                                \
++      } else {                                                        \
++              BUILD_BUG();                                            \
++      }                                                               \
++} while (0)
++#else
+ #define EMIT1_off32(b1, off)  do { EMIT1(b1); EMIT(off, 4);} while (0)
++#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
++#endif
+ #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
+ #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+@@ -90,6 +168,24 @@ do {                                                                       \
+ #define X86_JBE 0x76
+ #define X86_JA  0x77
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++#define APPEND_FLOW_VERIFY()  \
++do {                          \
++      /* mov ecx, randkey */  \
++      EMIT1(0xb9);            \
++      EMIT(randkey, 4);       \
++      /* cmp ecx, randkey */  \
++      EMIT2(0x81, 0xf9);      \
++      EMIT(randkey, 4);       \
++      /* jz after 8 int 3s */ \
++      EMIT2(0x74, 0x08);      \
++      EMIT(0xcccccccc, 4);    \
++      EMIT(0xcccccccc, 4);    \
++} while (0)
++#else
++#define APPEND_FLOW_VERIFY() do { } while (0)
++#endif
++
+ #define EMIT_COND_JMP(op, offset)                             \
+ do {                                                          \
+       if (is_near(offset))                                    \
+@@ -97,6 +193,7 @@ do {                                                                \
+       else {                                                  \
+               EMIT2(0x0f, op + 0x10);                         \
+               EMIT(offset, 4); /* jxx .+off32 */              \
++              APPEND_FLOW_VERIFY();                           \
+       }                                                       \
+ } while (0)
+@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
+       set_fs(old_fs);
+ }
++struct bpf_jit_work {
++      struct work_struct work;
++      void *image;
++};
++
+ #define CHOOSE_LOAD_FUNC(K, func) \
+       ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
+ void bpf_jit_compile(struct sk_filter *fp)
+ {
+-      u8 temp[64];
++      u8 temp[MAX_INSTR_CODE_SIZE];
+       u8 *prog;
+       unsigned int proglen, oldproglen = 0;
+       int ilen, i;
+@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
+       unsigned int *addrs;
+       const struct sock_filter *filter = fp->insns;
+       int flen = fp->len;
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++      unsigned int randkey;
++#endif
+       if (!bpf_jit_enable)
+               return;
+@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
+       if (addrs == NULL)
+               return;
++      fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
++      if (!fp->work)
++              goto out;
++
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++      randkey = get_random_int();
++#endif
++
+       /* Before first pass, make a rough estimation of addrs[]
+-       * each bpf instruction is translated to less than 64 bytes
++       * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
+        */
+       for (proglen = 0, i = 0; i < flen; i++) {
+-              proglen += 64;
++              proglen += MAX_INSTR_CODE_SIZE;
+               addrs[i] = proglen;
+       }
+       cleanup_addr = proglen; /* epilogue address */
+@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
+                       case BPF_S_ALU_MUL_K: /* A *= K */
+                               if (is_imm8(K))
+                                       EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
+-                              else {
+-                                      EMIT2(0x69, 0xc0);              /* imul imm32,%eax */
+-                                      EMIT(K, 4);
+-                              }
++                              else
++                                      EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
+                               break;
+                       case BPF_S_ALU_DIV_X: /* A /= X; */
+                               seen |= SEEN_XREG;
+@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
+                               break;
+                       case BPF_S_ALU_MOD_K: /* A %= K; */
+                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++                              DILUTE_CONST_SEQUENCE(K, randkey);
++#else
+                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
++#endif
+                               EMIT2(0xf7, 0xf1);      /* div %ecx */
+                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
+                               break;
+                       case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++                              DILUTE_CONST_SEQUENCE(K, randkey);
++                              // imul rax, rcx
++                              EMIT4(0x48, 0x0f, 0xaf, 0xc1);
++#else
+                               EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
+                               EMIT(K, 4);
++#endif
+                               EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+                               break;
+                       case BPF_S_ALU_AND_X:
+@@ -602,8 +723,7 @@ common_load_ind:           seen |= SEEN_DATAREF | SEEN_XREG;
+                                       if (is_imm8(K)) {
+                                               EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
+                                       } else {
+-                                              EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
+-                                              EMIT(K, 4);
++                                              EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
+                                       }
+                               } else {
+                                       EMIT2(0x89,0xde); /* mov %ebx,%esi */
+@@ -686,17 +806,18 @@ cond_branch:                     f_offset = addrs[i + filter[i].jf] - addrs[i];
+                               break;
+                       default:
+                               /* hmm, too complex filter, give up with jit compiler */
+-                              goto out;
++                              goto error;
+                       }
+                       ilen = prog - temp;
+                       if (image) {
+                               if (unlikely(proglen + ilen > oldproglen)) {
+                                       pr_err("bpb_jit_compile fatal error\n");
+-                                      kfree(addrs);
+-                                      module_free(NULL, image);
+-                                      return;
++                                      module_free_exec(NULL, image);
++                                      goto error;
+                               }
++                              pax_open_kernel();
+                               memcpy(image + proglen, temp, ilen);
++                              pax_close_kernel();
+                       }
+                       proglen += ilen;
+                       addrs[i] = proglen;
+@@ -717,11 +838,9 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
+                       break;
+               }
+               if (proglen == oldproglen) {
+-                      image = module_alloc(max_t(unsigned int,
+-                                                 proglen,
+-                                                 sizeof(struct work_struct)));
++                      image = module_alloc_exec(proglen);
+                       if (!image)
+-                              goto out;
++                              goto error;
+               }
+               oldproglen = proglen;
+       }
+@@ -732,7 +851,10 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
+       if (image) {
+               bpf_flush_icache(image, image + proglen);
+               fp->bpf_func = (void *)image;
+-      }
++      } else
++error:
++              kfree(fp->work);
++
+ out:
+       kfree(addrs);
+       return;
+@@ -740,18 +862,20 @@ out:
+ static void jit_free_defer(struct work_struct *arg)
+ {
+-      module_free(NULL, arg);
++      module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
++      kfree(arg);
+ }
+ /* run from softirq, we must use a work_struct to call
+- * module_free() from process context
++ * module_free_exec() from process context
+  */
+ void bpf_jit_free(struct sk_filter *fp)
+ {
+       if (fp->bpf_func != sk_run_filter) {
+-              struct work_struct *work = (struct work_struct *)fp->bpf_func;
++              struct work_struct *work = &fp->work->work;
+               INIT_WORK(work, jit_free_defer);
++              fp->work->image = fp->bpf_func;
+               schedule_work(work);
+       }
+ }
+diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
+index d6aa6e8..266395a 100644
+--- a/arch/x86/oprofile/backtrace.c
++++ b/arch/x86/oprofile/backtrace.c
+@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
+       struct stack_frame_ia32 *fp;
+       unsigned long bytes;
+-      bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++      bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+       if (bytes != sizeof(bufhead))
+               return NULL;
+-      fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
++      fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
+       oprofile_add_trace(bufhead[0].return_address);
+@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
+       struct stack_frame bufhead[2];
+       unsigned long bytes;
+-      bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++      bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+       if (bytes != sizeof(bufhead))
+               return NULL;
+@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
+ {
+       struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
+-      if (!user_mode_vm(regs)) {
++      if (!user_mode(regs)) {
+               unsigned long stack = kernel_stack_pointer(regs);
+               if (depth)
+                       dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 48768df..ba9143c 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -23,6 +23,7 @@
+ #include <asm/nmi.h>
+ #include <asm/msr.h>
+ #include <asm/apic.h>
++#include <asm/pgtable.h>
+ #include "op_counter.h"
+ #include "op_x86_model.h"
+@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
+       if (ret)
+               return ret;
+-      if (!model->num_virt_counters)
+-              model->num_virt_counters = model->num_counters;
++      if (!model->num_virt_counters) {
++              pax_open_kernel();
++              *(unsigned int *)&model->num_virt_counters = model->num_counters;
++              pax_close_kernel();
++      }
+       mux_init(ops);
+diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
+index b2b9443..be58856 100644
+--- a/arch/x86/oprofile/op_model_amd.c
++++ b/arch/x86/oprofile/op_model_amd.c
+@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
+               num_counters = AMD64_NUM_COUNTERS;
+       }
+-      op_amd_spec.num_counters = num_counters;
+-      op_amd_spec.num_controls = num_counters;
+-      op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
++      pax_open_kernel();
++      *(unsigned int *)&op_amd_spec.num_counters = num_counters;
++      *(unsigned int *)&op_amd_spec.num_controls = num_counters;
++      *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
+index d90528e..0127e2b 100644
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -19,6 +19,7 @@
+ #include <asm/msr.h>
+ #include <asm/apic.h>
+ #include <asm/nmi.h>
++#include <asm/pgtable.h>
+ #include "op_x86_model.h"
+ #include "op_counter.h"
+@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
+       num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
+-      op_arch_perfmon_spec.num_counters = num_counters;
+-      op_arch_perfmon_spec.num_controls = num_counters;
++      pax_open_kernel();
++      *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
++      *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
++      pax_close_kernel();
+ }
+ static int arch_perfmon_init(struct oprofile_operations *ignore)
+diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
+index 71e8a67..6a313bb 100644
+--- a/arch/x86/oprofile/op_x86_model.h
++++ b/arch/x86/oprofile/op_x86_model.h
+@@ -52,7 +52,7 @@ struct op_x86_model_spec {
+       void            (*switch_ctrl)(struct op_x86_model_spec const *model,
+                                      struct op_msrs const * const msrs);
+ #endif
+-};
++} __do_const;
+ struct op_counter_config;
+diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
+index e9e6ed5..e47ae67 100644
+--- a/arch/x86/pci/amd_bus.c
++++ b/arch/x86/pci/amd_bus.c
+@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
++static struct notifier_block amd_cpu_notifier = {
+       .notifier_call  = amd_cpu_notify,
+ };
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 372e9b8..e775a6c 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -50,7 +50,7 @@ struct irq_router {
+ struct irq_router_handler {
+       u16 vendor;
+       int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+-};
++} __do_const;
+ int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
+ void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
+@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
+       return 0;
+ }
+-static __initdata struct irq_router_handler pirq_routers[] = {
++static __initconst const struct irq_router_handler pirq_routers[] = {
+       { PCI_VENDOR_ID_INTEL, intel_router_probe },
+       { PCI_VENDOR_ID_AL, ali_router_probe },
+       { PCI_VENDOR_ID_ITE, ite_router_probe },
+@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
+ static void __init pirq_find_router(struct irq_router *r)
+ {
+       struct irq_routing_table *rt = pirq_table;
+-      struct irq_router_handler *h;
++      const struct irq_router_handler *h;
+ #ifdef CONFIG_PCI_BIOS
+       if (!rt->signature) {
+@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
+       {
+               .callback = fix_broken_hp_bios_irq9,
+               .ident = "HP Pavilion N5400 Series Laptop",
+diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
+index 6eb18c4..20d83de 100644
+--- a/arch/x86/pci/mrst.c
++++ b/arch/x86/pci/mrst.c
+@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
+       printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
+       pci_mmcfg_late_init();
+       pcibios_enable_irq = mrst_pci_irq_enable;
+-      pci_root_ops = pci_mrst_ops;
++      pax_open_kernel();
++      memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
++      pax_close_kernel();
+       pci_soc_mode = 1;
+       /* Continue with standard init */
+       return 1;
+diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
+index c77b24a..c979855 100644
+--- a/arch/x86/pci/pcbios.c
++++ b/arch/x86/pci/pcbios.c
+@@ -79,7 +79,7 @@ union bios32 {
+ static struct {
+       unsigned long address;
+       unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+ /*
+  * Returns the entry point for the given service, NULL on error
+@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
+       unsigned long length;           /* %ecx */
+       unsigned long entry;            /* %edx */
+       unsigned long flags;
++      struct desc_struct d, *gdt;
+       local_irq_save(flags);
+-      __asm__("lcall *(%%edi); cld"
++
++      gdt = get_cpu_gdt_table(smp_processor_id());
++
++      pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++      pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++      __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+               : "=a" (return_code),
+                 "=b" (address),
+                 "=c" (length),
+                 "=d" (entry)
+               : "0" (service),
+                 "1" (0),
+-                "D" (&bios32_indirect));
++                "D" (&bios32_indirect),
++                "r"(__PCIBIOS_DS)
++              : "memory");
++
++      pax_open_kernel();
++      gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++      gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++      gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++      gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++      pax_close_kernel();
++
+       local_irq_restore(flags);
+       switch (return_code) {
+-              case 0:
+-                      return address + entry;
+-              case 0x80:      /* Not present */
+-                      printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+-                      return 0;
+-              default: /* Shouldn't happen */
+-                      printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+-                              service, return_code);
++      case 0: {
++              int cpu;
++              unsigned char flags;
++
++              printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++              if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++                      printk(KERN_WARNING "bios32_service: not valid\n");
+                       return 0;
++              }
++              address = address + PAGE_OFFSET;
++              length += 16UL; /* some BIOSs underreport this... */
++              flags = 4;
++              if (length >= 64*1024*1024) {
++                      length >>= PAGE_SHIFT;
++                      flags |= 8;
++              }
++
++              for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++                      gdt = get_cpu_gdt_table(cpu);
++                      pack_descriptor(&d, address, length, 0x9b, flags);
++                      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++                      pack_descriptor(&d, address, length, 0x93, flags);
++                      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++              }
++              return entry;
++      }
++      case 0x80:      /* Not present */
++              printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++              return 0;
++      default: /* Shouldn't happen */
++              printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++                      service, return_code);
++              return 0;
+       }
+ }
+ static struct {
+       unsigned long address;
+       unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+ static int check_pcibios(void)
+ {
+@@ -131,11 +174,13 @@ static int check_pcibios(void)
+       unsigned long flags, pcibios_entry;
+       if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+-              pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++              pci_indirect.address = pcibios_entry;
+               local_irq_save(flags);
+-              __asm__(
+-                      "lcall *(%%edi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%edi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -144,7 +189,8 @@ static int check_pcibios(void)
+                         "=b" (ebx),
+                         "=c" (ecx)
+                       : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+-                        "D" (&pci_indirect)
++                        "D" (&pci_indirect),
++                        "r" (__PCIBIOS_DS)
+                       : "memory");
+               local_irq_restore(flags);
+@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+       switch (len) {
+       case 1:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+                       : "1" (PCIBIOS_READ_CONFIG_BYTE),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               /*
+                * Zero-extend the result beyond 8 bits, do not trust the
+                * BIOS having done it:
+@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+               *value &= 0xff;
+               break;
+       case 2:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+                       : "1" (PCIBIOS_READ_CONFIG_WORD),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               /*
+                * Zero-extend the result beyond 16 bits, do not trust the
+                * BIOS having done it:
+@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+               *value &= 0xffff;
+               break;
+       case 4:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+                       : "1" (PCIBIOS_READ_CONFIG_DWORD),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               break;
+       }
+@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+       switch (len) {
+       case 1:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+                         "c" (value),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               break;
+       case 2:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+                         "c" (value),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               break;
+       case 4:
+-              __asm__("lcall *(%%esi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%esi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+                         "c" (value),
+                         "b" (bx),
+                         "D" ((long)reg),
+-                        "S" (&pci_indirect));
++                        "S" (&pci_indirect),
++                        "r" (__PCIBIOS_DS));
+               break;
+       }
+@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+       DBG("PCI: Fetching IRQ routing table... ");
+       __asm__("push %%es\n\t"
++              "movw %w8, %%ds\n\t"
+               "push %%ds\n\t"
+               "pop  %%es\n\t"
+-              "lcall *(%%esi); cld\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
+               "pop %%es\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+                 "1" (0),
+                 "D" ((long) &opt),
+                 "S" (&pci_indirect),
+-                "m" (opt)
++                "m" (opt),
++                "r" (__PCIBIOS_DS)
+               : "memory");
+       DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+       if (ret & 0xff00)
+@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+ {
+       int ret;
+-      __asm__("lcall *(%%esi); cld\n\t"
++      __asm__("movw %w5, %%ds\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+               : "0" (PCIBIOS_SET_PCI_HW_INT),
+                 "b" ((dev->bus->number << 8) | dev->devfn),
+                 "c" ((irq << 8) | (pin + 10)),
+-                "S" (&pci_indirect));
++                "S" (&pci_indirect),
++                "r" (__PCIBIOS_DS));
+       return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
+index 40e4469..d915bf9 100644
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
+ {
+       struct desc_ptr gdt_descr;
++#ifdef CONFIG_PAX_KERNEXEC
++      struct desc_struct d;
++#endif
++
+       local_irq_save(efi_rt_eflags);
+       load_cr3(initial_page_table);
+       __flush_tlb_all();
++#ifdef CONFIG_PAX_KERNEXEC
++      pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++      pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+       gdt_descr.address = __pa(get_cpu_gdt_table(0));
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
+ {
+       struct desc_ptr gdt_descr;
++#ifdef CONFIG_PAX_KERNEXEC
++      struct desc_struct d;
++
++      memset(&d, 0, sizeof d);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+       gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
++#else
+       load_cr3(swapper_pg_dir);
++#endif
++
+       __flush_tlb_all();
+       local_irq_restore(efi_rt_eflags);
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 39a0e7f1..872396e 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
+               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+       }
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(swapper_pg_dir);
++#endif
++
+       __flush_tlb_all();
+ }
+@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
+       for (pgd = 0; pgd < n_pgds; pgd++)
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+       kfree(save_pgd);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
++#endif
++
+       __flush_tlb_all();
+       local_irq_restore(efi_flags);
+       early_code_mapping_set_exec(0);
+diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
+index fbe66e6..eae5e38 100644
+--- a/arch/x86/platform/efi/efi_stub_32.S
++++ b/arch/x86/platform/efi/efi_stub_32.S
+@@ -6,7 +6,9 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+ /*
+  * efi_call_phys(void *, ...) is a function with variable parameters.
+@@ -20,7 +22,7 @@
+  * service functions will comply with gcc calling convention, too.
+  */
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+       /*
+        * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
+        * The mapping of lower virtual memory has been created in prelog and
+        * epilog.
+        */
+-      movl    $1f, %edx
+-      subl    $__PAGE_OFFSET, %edx
+-      jmp     *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++      movl    $(__KERNEXEC_EFI_DS), %edx
++      mov     %edx, %ds
++      mov     %edx, %es
++      mov     %edx, %ss
++      addl    $2f,(1f)
++      ljmp    *(1f)
++
++__INITDATA
++1:    .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
++.previous
++
++2:
++      subl    $2b,(1b)
++#else
++      jmp     1f-__PAGE_OFFSET
+ 1:
++#endif
+       /*
+        * 2. Now on the top of stack is the return
+@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
+        * parameter 2, ..., param n. To make things easy, we save the return
+        * address of efi_call_phys in a global variable.
+        */
+-      popl    %edx
+-      movl    %edx, saved_return_addr
+-      /* get the function pointer into ECX*/
+-      popl    %ecx
+-      movl    %ecx, efi_rt_function_ptr
+-      movl    $2f, %edx
+-      subl    $__PAGE_OFFSET, %edx
+-      pushl   %edx
++      popl    (saved_return_addr)
++      popl    (efi_rt_function_ptr)
+       /*
+        * 3. Clear PG bit in %CR0.
+@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
+       /*
+        * 5. Call the physical function.
+        */
+-      jmp     *%ecx
++      call    *(efi_rt_function_ptr-__PAGE_OFFSET)
+-2:
+       /*
+        * 6. After EFI runtime service returns, control will return to
+        * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
+       movl    %cr0, %edx
+       orl     $0x80000000, %edx
+       movl    %edx, %cr0
+-      jmp     1f
+-1:
++
+       /*
+        * 8. Now restore the virtual mode from flat mode by
+        * adding EIP with PAGE_OFFSET.
+        */
+-      movl    $1f, %edx
+-      jmp     *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++      movl    $(__KERNEL_DS), %edx
++      mov     %edx, %ds
++      mov     %edx, %es
++      mov     %edx, %ss
++      ljmp    $(__KERNEL_CS),$1f
++#else
++      jmp     1f+__PAGE_OFFSET
++#endif
+ 1:
+       /*
+        * 9. Balance the stack. And because EAX contain the return value,
+        * we'd better not clobber it.
+        */
+-      leal    efi_rt_function_ptr, %edx
+-      movl    (%edx), %ecx
+-      pushl   %ecx
++      pushl   (efi_rt_function_ptr)
+       /*
+-       * 10. Push the saved return address onto the stack and return.
++       * 10. Return to the saved return address.
+        */
+-      leal    saved_return_addr, %edx
+-      movl    (%edx), %ecx
+-      pushl   %ecx
+-      ret
++      jmpl    *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+-.data
++__INITDATA
+ saved_return_addr:
+       .long 0
+ efi_rt_function_ptr:
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index 4c07cca..2c8427d 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -7,6 +7,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #define SAVE_XMM                      \
+       mov %rsp, %rax;                 \
+@@ -40,6 +41,7 @@ ENTRY(efi_call0)
+       call *%rdi
+       addq $32, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call0)
+@@ -50,6 +52,7 @@ ENTRY(efi_call1)
+       call *%rdi
+       addq $32, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call1)
+@@ -60,6 +63,7 @@ ENTRY(efi_call2)
+       call *%rdi
+       addq $32, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call2)
+@@ -71,6 +75,7 @@ ENTRY(efi_call3)
+       call *%rdi
+       addq $32, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call3)
+@@ -83,6 +88,7 @@ ENTRY(efi_call4)
+       call *%rdi
+       addq $32, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call4)
+@@ -96,6 +102,7 @@ ENTRY(efi_call5)
+       call *%rdi
+       addq $48, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call5)
+@@ -112,5 +119,6 @@ ENTRY(efi_call6)
+       call *%rdi
+       addq $48, %rsp
+       RESTORE_XMM
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call6)
+diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
+index a0a0a43..a48e233 100644
+--- a/arch/x86/platform/mrst/mrst.c
++++ b/arch/x86/platform/mrst/mrst.c
+@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+ EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+ int sfi_mrtc_num;
+-static void mrst_power_off(void)
++static __noreturn void mrst_power_off(void)
+ {
++      BUG();
+ }
+-static void mrst_reboot(void)
++static __noreturn void mrst_reboot(void)
+ {
+       intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
++      BUG();
+ }
+ /* parse all the mtimer info to a static mtimer array */
+diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
+index d6ee929..3637cb5 100644
+--- a/arch/x86/platform/olpc/olpc_dt.c
++++ b/arch/x86/platform/olpc/olpc_dt.c
+@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
+       return res;
+ }
+-static struct of_pdt_ops prom_olpc_ops __initdata = {
++static struct of_pdt_ops prom_olpc_ops __initconst = {
+       .nextprop = olpc_dt_nextprop,
+       .getproplen = olpc_dt_getproplen,
+       .getproperty = olpc_dt_getproperty,
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 1cf5b30..fd45732 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -137,11 +137,8 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+       int cpu = smp_processor_id();
+-      struct tss_struct *t = &per_cpu(init_tss, cpu);
+-#ifdef CONFIG_X86_64
+-      struct desc_struct *desc = get_cpu_gdt_table(cpu);
+-      tss_desc tss;
+-#endif
++      struct tss_struct *t = init_tss + cpu;
++
+       set_tss_desc(cpu, t);   /*
+                                * This just modifies memory; should not be
+                                * necessary. But... This is necessary, because
+@@ -150,10 +147,6 @@ static void fix_processor_context(void)
+                                */
+ #ifdef CONFIG_X86_64
+-      memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
+-      tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
+-      write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
+-
+       syscall_init();                         /* This sets MSR_*STAR and related */
+ #endif
+       load_TR_desc();                         /* This does ltr */
+diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
+index a44f457..9140171 100644
+--- a/arch/x86/realmode/init.c
++++ b/arch/x86/realmode/init.c
+@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
+               __va(real_mode_header->trampoline_header);
+ #ifdef CONFIG_X86_32
+-      trampoline_header->start = __pa_symbol(startup_32_smp);
++      trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
++
++#ifdef CONFIG_PAX_KERNEXEC
++      trampoline_header->start -= LOAD_PHYSICAL_ADDR;
++#endif
++
++      trampoline_header->boot_cs = __BOOT_CS;
+       trampoline_header->gdt_limit = __BOOT_DS + 7;
+       trampoline_header->gdt_base = __pa_symbol(boot_gdt);
+ #else
+@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
+       *trampoline_cr4_features = read_cr4();
+       trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+-      trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
++      trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
+       trampoline_pgd[511] = init_level4_pgt[511].pgd;
+ #endif
+ }
+diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
+index 8869287..d577672 100644
+--- a/arch/x86/realmode/rm/Makefile
++++ b/arch/x86/realmode/rm/Makefile
+@@ -78,5 +78,8 @@ KBUILD_CFLAGS        := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+                       $(call cc-option, -fno-unit-at-a-time)) \
+                  $(call cc-option, -fno-stack-protector) \
+                  $(call cc-option, -mpreferred-stack-boundary=2)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
+index a28221d..93c40f1 100644
+--- a/arch/x86/realmode/rm/header.S
++++ b/arch/x86/realmode/rm/header.S
+@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
+ #endif
+       /* APM/BIOS reboot */
+       .long   pa_machine_real_restart_asm
+-#ifdef CONFIG_X86_64
++#ifdef CONFIG_X86_32
++      .long   __KERNEL_CS
++#else
+       .long   __KERNEL32_CS
+ #endif
+ END(real_mode_header)
+diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
+index c1b2791..f9e31c7 100644
+--- a/arch/x86/realmode/rm/trampoline_32.S
++++ b/arch/x86/realmode/rm/trampoline_32.S
+@@ -25,6 +25,12 @@
+ #include <asm/page_types.h>
+ #include "realmode.h"
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) (pa_ ## X)
++#endif
++
+       .text
+       .code16
+@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
+       cli                     # We should be safe anyway
+-      movl    tr_start, %eax  # where we need to go
+-
+       movl    $0xA5A5A5A5, trampoline_status
+                               # write marker for master knows we're running
+@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
+       movw    $1, %dx                 # protected mode (PE) bit
+       lmsw    %dx                     # into protected mode
+-      ljmpl   $__BOOT_CS, $pa_startup_32
++      ljmpl *(trampoline_header)
+       .section ".text32","ax"
+       .code32
+@@ -67,7 +71,7 @@ ENTRY(startup_32)                    # note: also used from wakeup_asm.S
+       .balign 8
+ GLOBAL(trampoline_header)
+       tr_start:               .space  4
+-      tr_gdt_pad:             .space  2
++      tr_boot_cs:             .space  2
+       tr_gdt:                 .space  6
+ END(trampoline_header)
+       
+diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
+index bb360dc..d0fd8f8 100644
+--- a/arch/x86/realmode/rm/trampoline_64.S
++++ b/arch/x86/realmode/rm/trampoline_64.S
+@@ -94,6 +94,7 @@ ENTRY(startup_32)
+       movl    %edx, %gs
+       movl    pa_tr_cr4, %eax
++      andl    $~X86_CR4_PCIDE, %eax
+       movl    %eax, %cr4              # Enable PAE mode
+       # Setup trampoline 4 level pagetables
+@@ -107,7 +108,7 @@ ENTRY(startup_32)
+       wrmsr
+       # Enable paging and in turn activate Long Mode
+-      movl    $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
++      movl    $(X86_CR0_PG | X86_CR0_PE), %eax
+       movl    %eax, %cr0
+       /*
+diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
+index e812034..c747134 100644
+--- a/arch/x86/tools/Makefile
++++ b/arch/x86/tools/Makefile
+@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
+ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
+ hostprogs-y   += relocs
+ relocs-objs     := relocs_32.o relocs_64.o relocs_common.o
+ relocs: $(obj)/relocs
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index f7bab68..b6d9886 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -1,5 +1,7 @@
+ /* This is included from relocs_32/64.c */
++#include "../../../include/generated/autoconf.h"
++
+ #define ElfW(type)            _ElfW(ELF_BITS, type)
+ #define _ElfW(bits, type)     __ElfW(bits, type)
+ #define __ElfW(bits, type)    Elf##bits##_##type
+@@ -11,6 +13,7 @@
+ #define Elf_Sym                       ElfW(Sym)
+ static Elf_Ehdr ehdr;
++static Elf_Phdr *phdr;
+ struct relocs {
+       uint32_t        *offset;
+@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
+       }
+ }
++static void read_phdrs(FILE *fp)
++{
++      unsigned int i;
++
++      phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
++      if (!phdr) {
++              die("Unable to allocate %d program headers\n",
++                  ehdr.e_phnum);
++      }
++      if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++              die("Seek to %d failed: %s\n",
++                      ehdr.e_phoff, strerror(errno));
++      }
++      if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++              die("Cannot read ELF program headers: %s\n",
++                      strerror(errno));
++      }
++      for(i = 0; i < ehdr.e_phnum; i++) {
++              phdr[i].p_type      = elf_word_to_cpu(phdr[i].p_type);
++              phdr[i].p_offset    = elf_off_to_cpu(phdr[i].p_offset);
++              phdr[i].p_vaddr     = elf_addr_to_cpu(phdr[i].p_vaddr);
++              phdr[i].p_paddr     = elf_addr_to_cpu(phdr[i].p_paddr);
++              phdr[i].p_filesz    = elf_word_to_cpu(phdr[i].p_filesz);
++              phdr[i].p_memsz     = elf_word_to_cpu(phdr[i].p_memsz);
++              phdr[i].p_flags     = elf_word_to_cpu(phdr[i].p_flags);
++              phdr[i].p_align     = elf_word_to_cpu(phdr[i].p_align);
++      }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+-      int i;
++      unsigned int i;
+       Elf_Shdr shdr;
+       secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
+ static void read_strtabs(FILE *fp)
+ {
+-      int i;
++      unsigned int i;
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
+ static void read_symtabs(FILE *fp)
+ {
+-      int i,j;
++      unsigned int i,j;
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
+ }
+-static void read_relocs(FILE *fp)
++static void read_relocs(FILE *fp, int use_real_mode)
+ {
+-      int i,j;
++      unsigned int i,j;
++      uint32_t base;
++
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
+                       die("Cannot read symbol table: %s\n",
+                               strerror(errno));
+               }
++              base = 0;
++
++#ifdef CONFIG_X86_32
++              for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
++                      if (phdr[j].p_type != PT_LOAD )
++                              continue;
++                      if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++                              continue;
++                      base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++                      break;
++              }
++#endif
++
+               for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+                       Elf_Rel *rel = &sec->reltab[j];
+-                      rel->r_offset = elf_addr_to_cpu(rel->r_offset);
++                      rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
+                       rel->r_info   = elf_xword_to_cpu(rel->r_info);
+ #if (SHT_REL_TYPE == SHT_RELA)
+                       rel->r_addend = elf_xword_to_cpu(rel->r_addend);
+@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
+ static void print_absolute_symbols(void)
+ {
+-      int i;
++      unsigned int i;
+       const char *format;
+       if (ELF_BITS == 64)
+@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               char *sym_strtab;
+-              int j;
++              unsigned int j;
+               if (sec->shdr.sh_type != SHT_SYMTAB) {
+                       continue;
+@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
+ static void print_absolute_relocs(void)
+ {
+-      int i, printed = 0;
++      unsigned int i, printed = 0;
+       const char *format;
+       if (ELF_BITS == 64)
+@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
+               struct section *sec_applies, *sec_symtab;
+               char *sym_strtab;
+               Elf_Sym *sh_symtab;
+-              int j;
++              unsigned int j;
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+                       continue;
+               }
+@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
+ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+                       Elf_Sym *sym, const char *symname))
+ {
+-      int i;
++      unsigned int i;
+       /* Walk through the relocations */
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               char *sym_strtab;
+               Elf_Sym *sh_symtab;
+               struct section *sec_applies, *sec_symtab;
+-              int j;
++              unsigned int j;
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ {
+       unsigned r_type = ELF32_R_TYPE(rel->r_info);
+       int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
++      char *sym_strtab = sec->link->link->strtab;
++
++      /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++      if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++              return 0;
++
++#ifdef CONFIG_PAX_KERNEXEC
++      /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++      if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++              return 0;
++#endif
+       switch (r_type) {
+       case R_386_NONE:
+@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
+ static void emit_relocs(int as_text, int use_real_mode)
+ {
+-      int i;
++      unsigned int i;
+       int (*write_reloc)(uint32_t, FILE *) = write32;
+       int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+                       const char *symname);
+@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
+ {
+       regex_init(use_real_mode);
+       read_ehdr(fp);
++      read_phdrs(fp);
+       read_shdrs(fp);
+       read_strtabs(fp);
+       read_symtabs(fp);
+-      read_relocs(fp);
++      read_relocs(fp, use_real_mode);
+       if (ELF_BITS == 64)
+               percpu_init();
+       if (show_absolute_syms) {
+diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
+index 80ffa5b..a33bd15 100644
+--- a/arch/x86/um/tls_32.c
++++ b/arch/x86/um/tls_32.c
+@@ -260,7 +260,7 @@ out:
+       if (unlikely(task == current &&
+                    !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+               printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+-                              "without flushed TLS.", current->pid);
++                              "without flushed TLS.", task_pid_nr(current));
+       }
+       return 0;
+diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
+index fd14be1..e3c79c0 100644
+--- a/arch/x86/vdso/Makefile
++++ b/arch/x86/vdso/Makefile
+@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO    $@
+                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
+ #
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index 0faad64..39ef157 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -25,6 +25,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
++#include <asm/mman.h>
+ enum {
+       VDSO_DISABLED = 0,
+@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
+ void enable_sep_cpu(void)
+ {
+       int cpu = get_cpu();
+-      struct tss_struct *tss = &per_cpu(init_tss, cpu);
++      struct tss_struct *tss = init_tss + cpu;
+       if (!boot_cpu_has(X86_FEATURE_SEP)) {
+               put_cpu();
+@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+-      gate_vma.vm_page_prot = __P101;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       if (compat)
+               addr = VDSO_HIGH_BASE;
+       else {
+-              addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++              addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+               if (IS_ERR_VALUE(addr)) {
+                       ret = addr;
+                       goto up_fail;
+               }
+       }
+-      current->mm->context.vdso = (void *)addr;
++      current->mm->context.vdso = addr;
+       if (compat_uses_vma || !compat) {
+               /*
+@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       }
+       current_thread_info()->sysenter_return =
+-              VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++              (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+   up_fail:
+       if (ret)
+-              current->mm->context.vdso = NULL;
++              current->mm->context.vdso = 0;
+       up_write(&mm->mmap_sem);
+@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+-      if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++      if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+               return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++              return "[vdso]";
++#endif
++
+       return NULL;
+ }
+@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+        * Check to see if the corresponding task was created in compat vdso
+        * mode.
+        */
+-      if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++      if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+               return &gate_vma;
+       return NULL;
+ }
+diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
+index 431e875..cbb23f3 100644
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -16,8 +16,6 @@
+ #include <asm/vdso.h>
+ #include <asm/page.h>
+-unsigned int __read_mostly vdso_enabled = 1;
+-
+ extern char vdso_start[], vdso_end[];
+ extern unsigned short vdso_sync_cpuid;
+@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+        * unaligned here as a result of stack start randomization.
+        */
+       addr = PAGE_ALIGN(addr);
+-      addr = align_vdso_addr(addr);
+       return addr;
+ }
+@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
+                                 unsigned size)
+ {
+       struct mm_struct *mm = current->mm;
+-      unsigned long addr;
++      unsigned long addr = 0;
+       int ret;
+-      if (!vdso_enabled)
+-              return 0;
+-
+       down_write(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       addr = vdso_addr(mm->start_stack, size);
++      addr = align_vdso_addr(addr);
+       addr = get_unmapped_area(NULL, addr, size, 0, 0);
+       if (IS_ERR_VALUE(addr)) {
+               ret = addr;
+               goto up_fail;
+       }
+-      current->mm->context.vdso = (void *)addr;
++      mm->context.vdso = addr;
+       ret = install_special_mapping(mm, addr, size,
+                                     VM_READ|VM_EXEC|
+                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                     pages);
+-      if (ret) {
+-              current->mm->context.vdso = NULL;
+-              goto up_fail;
+-      }
++      if (ret)
++              mm->context.vdso = 0;
+ up_fail:
+       up_write(&mm->mmap_sem);
+@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+                                     vdsox32_size);
+ }
+ #endif
+-
+-static __init int vdso_setup(char *s)
+-{
+-      vdso_enabled = simple_strtoul(s, NULL, 0);
+-      return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a492be2..08678da 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+ struct shared_info xen_dummy_shared_info;
+-void *xen_initial_gdt;
+-
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+ __read_mostly int xen_have_vector_callback;
+ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+       unsigned long va = dtr->address;
+       unsigned int size = dtr->size + 1;
+-      unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-      unsigned long frames[pages];
++      unsigned long frames[65536 / PAGE_SIZE];
+       int f;
+       /*
+@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ {
+       unsigned long va = dtr->address;
+       unsigned int size = dtr->size + 1;
+-      unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-      unsigned long frames[pages];
++      unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
+       int f;
+       /*
+@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+        * 8-byte entries, or 16 4k pages..
+        */
+-      BUG_ON(size > 65536);
++      BUG_ON(size > GDT_SIZE);
+       BUG_ON(va & ~PAGE_MASK);
+       for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
+         return 0;
+ }
+-static void set_xen_basic_apic_ops(void)
++static void __init set_xen_basic_apic_ops(void)
+ {
+       apic->read = xen_apic_read;
+       apic->write = xen_apic_write;
+@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+ #endif
+ };
+-static void xen_reboot(int reason)
++static __noreturn void xen_reboot(int reason)
+ {
+       struct sched_shutdown r = { .reason = reason };
+-      if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+-              BUG();
++      HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
++      BUG();
+ }
+-static void xen_restart(char *msg)
++static __noreturn void xen_restart(char *msg)
+ {
+       xen_reboot(SHUTDOWN_reboot);
+ }
+-static void xen_emergency_restart(void)
++static __noreturn void xen_emergency_restart(void)
+ {
+       xen_reboot(SHUTDOWN_reboot);
+ }
+-static void xen_machine_halt(void)
++static __noreturn void xen_machine_halt(void)
+ {
+       xen_reboot(SHUTDOWN_poweroff);
+ }
+-static void xen_machine_power_off(void)
++static __noreturn void xen_machine_power_off(void)
+ {
+       if (pm_power_off)
+               pm_power_off();
+@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
+       __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+       /* Work out if we support NX */
+-      x86_configure_nx();
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++      if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
++          (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
++              unsigned l, h;
++
++              __supported_pte_mask |= _PAGE_NX;
++              rdmsr(MSR_EFER, l, h);
++              l |= EFER_NX;
++              wrmsr(MSR_EFER, l, h);
++      }
++#endif
+       xen_setup_features();
+@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
+       machine_ops = xen_machine_ops;
+-      /*
+-       * The only reliable way to retain the initial address of the
+-       * percpu gdt_page is to remember it here, so we can go and
+-       * mark it RW later, when the initial percpu area is freed.
+-       */
+-      xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+       xen_smp_init();
+ #ifdef CONFIG_ACPI_NUMA
+@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
++static struct notifier_block xen_hvm_cpu_notifier = {
+       .notifier_call  = xen_hvm_cpu_notify,
+ };
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index fdc3ba2..3daee39 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+       /* L3_k[510] -> level2_kernel_pgt
+        * L3_i[511] -> level2_fixmap_pgt */
+       convert_pfn_mfn(level3_kernel_pgt);
++      convert_pfn_mfn(level3_vmalloc_start_pgt);
++      convert_pfn_mfn(level3_vmalloc_end_pgt);
++      convert_pfn_mfn(level3_vmemmap_pgt);
+       /* We get [511][511] and have Xen's version of level2_kernel_pgt */
+       l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++      set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
++      set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
++      set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+       set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
++      set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
+       pv_mmu_ops.set_pud = xen_set_pud;
+ #if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.set_pgd = xen_set_pgd;
++      pv_mmu_ops.set_pgd_batched = xen_set_pgd;
+ #endif
+       /* This will work as long as patching hasn't happened yet
+@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+       .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+       .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+       .set_pgd = xen_set_pgd_hyper,
++      .set_pgd_batched = xen_set_pgd_hyper,
+       .alloc_pud = xen_alloc_pmd_init,
+       .release_pud = xen_release_pmd_init,
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index d99cae8..18401e1 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
+ {
+       BUG_ON(smp_processor_id() != 0);
+       native_smp_prepare_boot_cpu();
+-
+-      /* We've switched to the "real" per-cpu gdt, so make sure the
+-         old memory can be recycled */
+-      make_lowmem_page_readwrite(xen_initial_gdt);
+-
+       xen_filter_cpu_maps();
+       xen_setup_vcpu_info_placement();
+ }
+@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+       ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+       ctxt->user_regs.fs = __KERNEL_PERCPU;
+-      ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
++      savesegment(gs, ctxt->user_regs.gs);
+ #else
+       ctxt->gs_base_kernel = per_cpu_offset(cpu);
+ #endif
+@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+       {
+               ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+-              ctxt->user_regs.ds = __USER_DS;
+-              ctxt->user_regs.es = __USER_DS;
++              ctxt->user_regs.ds = __KERNEL_DS;
++              ctxt->user_regs.es = __KERNEL_DS;
+               xen_copy_trap_info(ctxt->trap_ctxt);
+@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
+       int rc;
+       per_cpu(current_task, cpu) = idle;
++      per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+       irq_ctx_init(cpu);
+ #else
+       clear_tsk_thread_flag(idle, TIF_FORK);
+-      per_cpu(kernel_stack, cpu) =
+-              (unsigned long)task_stack_page(idle) -
+-              KERNEL_STACK_OFFSET + THREAD_SIZE;
++      per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ #endif
+       xen_setup_runstate_info(cpu);
+       xen_setup_timer(cpu);
+@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
+ void __init xen_smp_init(void)
+ {
+-      smp_ops = xen_smp_ops;
++      memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
+       xen_fill_possible_map();
+       xen_init_spinlocks();
+ }
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index 33ca6e4..0ded929 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -84,14 +84,14 @@ ENTRY(xen_iret)
+       ESP_OFFSET=4    # bytes pushed onto stack
+       /*
+-       * Store vcpu_info pointer for easy access.  Do it this way to
+-       * avoid having to reload %fs
++       * Store vcpu_info pointer for easy access.
+        */
+ #ifdef CONFIG_SMP
+-      GET_THREAD_INFO(%eax)
+-      movl %ss:TI_cpu(%eax), %eax
+-      movl %ss:__per_cpu_offset(,%eax,4), %eax
+-      mov %ss:xen_vcpu(%eax), %eax
++      push %fs
++      mov $(__KERNEL_PERCPU), %eax
++      mov %eax, %fs
++      mov PER_CPU_VAR(xen_vcpu), %eax
++      pop %fs
+ #else
+       movl %ss:xen_vcpu, %eax
+ #endif
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 7faed58..ba4427c 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -19,6 +19,17 @@ ENTRY(startup_xen)
+ #ifdef CONFIG_X86_32
+       mov %esi,xen_start_info
+       mov $init_thread_union+THREAD_SIZE,%esp
++#ifdef CONFIG_SMP
++      movl $cpu_gdt_table,%edi
++      movl $__per_cpu_load,%eax
++      movw %ax,__KERNEL_PERCPU + 2(%edi)
++      rorl $16,%eax
++      movb %al,__KERNEL_PERCPU + 4(%edi)
++      movb %ah,__KERNEL_PERCPU + 7(%edi)
++      movl $__per_cpu_end - 1,%eax
++      subl $__per_cpu_start,%eax
++      movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
+ #else
+       mov %rsi,xen_start_info
+       mov $init_thread_union+THREAD_SIZE,%rsp
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index a95b417..b6dbd0b 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
+index 525bd3d..ef888b1 100644
+--- a/arch/xtensa/variants/dc232b/include/variant/core.h
++++ b/arch/xtensa/variants/dc232b/include/variant/core.h
+@@ -119,9 +119,9 @@
+   ----------------------------------------------------------------------*/
+ #define XCHAL_ICACHE_LINESIZE         32      /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE         32      /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH                5       /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH                5       /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE         (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)   /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_SIZE             16384   /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE             16384   /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
+index 2f33760..835e50a 100644
+--- a/arch/xtensa/variants/fsf/include/variant/core.h
++++ b/arch/xtensa/variants/fsf/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_H
+ #define _XTENSA_CORE_H
++#include <linux/const.h>
+ /****************************************************************************
+           Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -112,9 +113,9 @@
+   ----------------------------------------------------------------------*/
+ #define XCHAL_ICACHE_LINESIZE         16      /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE         16      /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH                4       /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH                4       /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE         (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_SIZE             8192    /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE             8192    /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
+index af00795..2bb8105 100644
+--- a/arch/xtensa/variants/s6000/include/variant/core.h
++++ b/arch/xtensa/variants/s6000/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_CONFIGURATION_H
+ #define _XTENSA_CORE_CONFIGURATION_H
++#include <linux/const.h>
+ /****************************************************************************
+           Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -118,9 +119,9 @@
+   ----------------------------------------------------------------------*/
+ #define XCHAL_ICACHE_LINESIZE         16      /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE         16      /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH                4       /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH                4       /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE         (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)   /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_SIZE             32768   /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE             32768   /* D-cache size in bytes or 0 */
+diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
+index 58916af..eb9dbcf6 100644
+--- a/block/blk-iopoll.c
++++ b/block/blk-iopoll.c
+@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
+ }
+ EXPORT_SYMBOL(blk_iopoll_complete);
+-static void blk_iopoll_softirq(struct softirq_action *h)
++static void blk_iopoll_softirq(void)
+ {
+       struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+       int rearm = 0, budget = blk_iopoll_budget;
+@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
++static struct notifier_block blk_iopoll_cpu_notifier = {
+       .notifier_call  = blk_iopoll_cpu_notify,
+ };
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 623e1cd..ca1e109 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+       if (!len || !kbuf)
+               return -EINVAL;
+-      do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
++      do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
+       if (do_copy)
+               bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+       else
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 467c8de..f3628c5 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+  * Softirq action handler - move entries to local list and loop over them
+  * while passing them to the queue registered handler.
+  */
+-static void blk_done_softirq(struct softirq_action *h)
++static void blk_done_softirq(void)
+ {
+       struct list_head *cpu_list, local_list;
+@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
++static struct notifier_block blk_cpu_notifier = {
+       .notifier_call  = blk_cpu_notify,
+ };
+diff --git a/block/bsg.c b/block/bsg.c
+index 420a5a9..23834aa 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
+                               struct sg_io_v4 *hdr, struct bsg_device *bd,
+                               fmode_t has_write_perm)
+ {
++      unsigned char tmpcmd[sizeof(rq->__cmd)];
++      unsigned char *cmdptr;
++
+       if (hdr->request_len > BLK_MAX_CDB) {
+               rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+               if (!rq->cmd)
+                       return -ENOMEM;
+-      }
++              cmdptr = rq->cmd;
++      } else
++              cmdptr = tmpcmd;
+-      if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
++      if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
+                          hdr->request_len))
+               return -EFAULT;
++      if (cmdptr != rq->cmd)
++              memcpy(rq->cmd, cmdptr, hdr->request_len);
++
+       if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+               if (blk_verify_command(rq->cmd, has_write_perm))
+                       return -EPERM;
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index 7c668c8..db3521c 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
+               err |= __get_user(f->spec1, &uf->spec1);
+               err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+               err |= __get_user(name, &uf->name);
+-              f->name = compat_ptr(name);
++              f->name = (void __force_kernel *)compat_ptr(name);
+               if (err) {
+                       err = -EFAULT;
+                       goto out;
+diff --git a/block/genhd.c b/block/genhd.c
+index cdeb527..10aa34db 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+ /*
+  * Register device numbers dev..(dev+range-1)
+- * range must be nonzero
++ * Noop if @range is zero.
+  * The hash chain is sorted on range, so that subranges can override.
+  */
+ void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+                        struct kobject *(*probe)(dev_t, int *, void *),
+                        int (*lock)(dev_t, void *), void *data)
+ {
+-      kobj_map(bdev_map, devt, range, module, probe, lock, data);
++      if (range)
++              kobj_map(bdev_map, devt, range, module, probe, lock, data);
+ }
+ EXPORT_SYMBOL(blk_register_region);
++/* undo blk_register_region(), noop if @range is zero */
+ void blk_unregister_region(dev_t devt, unsigned long range)
+ {
+-      kobj_unmap(bdev_map, devt, range);
++      if (range)
++              kobj_unmap(bdev_map, devt, range);
+ }
+ EXPORT_SYMBOL(blk_unregister_region);
+diff --git a/block/partitions/efi.c b/block/partitions/efi.c
+index c85fc89..51e690b 100644
+--- a/block/partitions/efi.c
++++ b/block/partitions/efi.c
+@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+       if (!gpt)
+               return NULL;
++      if (!le32_to_cpu(gpt->num_partition_entries))
++              return NULL;
++      pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
++      if (!pte)
++              return NULL;
++
+       count = le32_to_cpu(gpt->num_partition_entries) *
+                 le32_to_cpu(gpt->sizeof_partition_entry);
+-      if (!count)
+-              return NULL;
+-      pte = kmalloc(count, GFP_KERNEL);
+-      if (!pte)
+-              return NULL;
+-
+       if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+                      (u8 *) pte,
+                    count) < count) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index a5ffcc9..3cedc9c 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
+ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+                            struct sg_io_hdr *hdr, fmode_t mode)
+ {
+-      if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
++      unsigned char tmpcmd[sizeof(rq->__cmd)];
++      unsigned char *cmdptr;
++
++      if (rq->cmd != rq->__cmd)
++              cmdptr = rq->cmd;
++      else
++              cmdptr = tmpcmd;
++
++      if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
+               return -EFAULT;
++
++      if (cmdptr != rq->cmd)
++              memcpy(rq->cmd, cmdptr, hdr->cmd_len);
++
+       if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+               return -EPERM;
+@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+       int err;
+       unsigned int in_len, out_len, bytes, opcode, cmdlen;
+       char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
++      unsigned char tmpcmd[sizeof(rq->__cmd)];
++      unsigned char *cmdptr;
+       if (!sic)
+               return -EINVAL;
+@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+        */
+       err = -EFAULT;
+       rq->cmd_len = cmdlen;
+-      if (copy_from_user(rq->cmd, sic->data, cmdlen))
++
++      if (rq->cmd != rq->__cmd)
++              cmdptr = rq->cmd;
++      else
++              cmdptr = tmpcmd;
++
++      if (copy_from_user(cmdptr, sic->data, cmdlen))
+               goto error;
++      if (rq->cmd != cmdptr)
++              memcpy(rq->cmd, cmdptr, cmdlen);
++
+       if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+               goto error;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 7bdd61b..afec999 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
+ struct cryptd_blkcipher_request_ctx {
+       crypto_completion_t complete;
+-};
++} __no_const;
+ struct cryptd_hash_ctx {
+       struct crypto_shash *child;
+@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
+ struct cryptd_aead_request_ctx {
+       crypto_completion_t complete;
+-};
++} __no_const;
+ static void cryptd_queue_worker(struct work_struct *work);
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index b2c99dc..476c9fb 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+       int ret;
+       pinst->kobj.kset = pcrypt_kset;
+-      ret = kobject_add(&pinst->kobj, NULL, name);
++      ret = kobject_add(&pinst->kobj, NULL, "%s", name);
+       if (!ret)
+               kobject_uevent(&pinst->kobj, KOBJ_ADD);
+@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
+       get_online_cpus();
+-      pcrypt->wq = alloc_workqueue(name,
+-                                   WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
++      pcrypt->wq = alloc_workqueue("%s",
++                                   WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
+       if (!pcrypt->wq)
+               goto err;
+diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
+index f220d64..d359ad6 100644
+--- a/drivers/acpi/apei/apei-internal.h
++++ b/drivers/acpi/apei/apei-internal.h
+@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type {
+       u32 flags;
+       apei_exec_ins_func_t run;
+-};
++} __do_const;
+ struct apei_exec_context {
+       u32 ip;
+diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
+index 33dc6a0..4b24b47 100644
+--- a/drivers/acpi/apei/cper.c
++++ b/drivers/acpi/apei/cper.c
+@@ -39,12 +39,12 @@
+  */
+ u64 cper_next_record_id(void)
+ {
+-      static atomic64_t seq;
++      static atomic64_unchecked_t seq;
+-      if (!atomic64_read(&seq))
+-              atomic64_set(&seq, ((u64)get_seconds()) << 32);
++      if (!atomic64_read_unchecked(&seq))
++              atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
+-      return atomic64_inc_return(&seq);
++      return atomic64_inc_return_unchecked(&seq);
+ }
+ EXPORT_SYMBOL_GPL(cper_next_record_id);
+diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
+index be60399..778b33e8 100644
+--- a/drivers/acpi/bgrt.c
++++ b/drivers/acpi/bgrt.c
+@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
+               return -ENODEV;
+       sysfs_bin_attr_init(&image_attr);
+-      image_attr.private = bgrt_image;
+-      image_attr.size = bgrt_image_size;
++      pax_open_kernel();
++      *(void **)&image_attr.private = bgrt_image;
++      *(size_t *)&image_attr.size = bgrt_image_size;
++      pax_close_kernel();
+       bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
+       if (!bgrt_kobj)
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..b81293b 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
+       u32 is_critical_error;
+ };
+-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
+ /*
+  * POLICY: If *anything* doesn't work, put it on the blacklist.
+@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
+       {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Fujitsu Siemens",
+diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
+index 7586544..636a2f0 100644
+--- a/drivers/acpi/ec_sys.c
++++ b/drivers/acpi/ec_sys.c
+@@ -12,6 +12,7 @@
+ #include <linux/acpi.h>
+ #include <linux/debugfs.h>
+ #include <linux/module.h>
++#include <linux/uaccess.h>
+ #include "internal.h"
+ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
+        * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
+        */
+       unsigned int size = EC_SPACE_SIZE;
+-      u8 *data = (u8 *) buf;
++      u8 data;
+       loff_t init_off = *off;
+       int err = 0;
+@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
+               size = count;
+       while (size) {
+-              err = ec_read(*off, &data[*off - init_off]);
++              err = ec_read(*off, &data);
+               if (err)
+                       return err;
++              if (put_user(data, &buf[*off - init_off]))
++                      return -EFAULT;
+               *off += 1;
+               size--;
+       }
+@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
+       unsigned int size = count;
+       loff_t init_off = *off;
+-      u8 *data = (u8 *) buf;
+       int err = 0;
+       if (*off >= EC_SPACE_SIZE)
+@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
+       }
+       while (size) {
+-              u8 byte_write = data[*off - init_off];
++              u8 byte_write;
++              if (get_user(byte_write, &buf[*off - init_off]))
++                      return -EFAULT;
+               err = ec_write(*off, byte_write);
+               if (err)
+                       return err;
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index eb133c7..f571552 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+ {
+       int i, count = CPUIDLE_DRIVER_STATE_START;
+       struct acpi_processor_cx *cx;
+-      struct cpuidle_state *state;
++      cpuidle_state_no_const *state;
+       struct cpuidle_driver *drv = &acpi_idle_driver;
+       if (!pr->flags.power_setup_done)
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index fcae5fa..e9f71ea 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -423,11 +423,11 @@ static u32 num_counters;
+ static struct attribute **all_attrs;
+ static u32 acpi_gpe_count;
+-static struct attribute_group interrupt_stats_attr_group = {
++static attribute_group_no_const interrupt_stats_attr_group = {
+       .name = "interrupts",
+ };
+-static struct kobj_attribute *counter_attrs;
++static kobj_attribute_no_const *counter_attrs;
+ static void delete_gpe_attr_array(void)
+ {
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 7b9bdd8..37638ca 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
+ }
+ EXPORT_SYMBOL_GPL(ahci_kick_engine);
+-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
++static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+                               struct ata_taskfile *tf, int is_cmd, u16 flags,
+                               unsigned long timeout_msec)
+ {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index adf002a..39bb8f9 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+       struct ata_port *ap;
+       unsigned int tag;
+-      WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++      BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       ap = qc->ap;
+       qc->flags = 0;
+@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+       struct ata_port *ap;
+       struct ata_link *link;
+-      WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++      BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+       ap = qc->ap;
+       link = qc->dev->link;
+@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+               return;
+       spin_lock(&lock);
++      pax_open_kernel();
+       for (cur = ops->inherits; cur; cur = cur->inherits) {
+               void **inherit = (void **)cur;
+@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+               if (IS_ERR(*pp))
+                       *pp = NULL;
+-      ops->inherits = NULL;
++      *(struct ata_port_operations **)&ops->inherits = NULL;
++      pax_close_kernel();
+       spin_unlock(&lock);
+ }
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index 7638121..357a965 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
+       /* Handle platform specific quirks */
+       if (quirk) {
+               if (quirk & CF_BROKEN_PIO) {
+-                      ap->ops->set_piomode = NULL;
++                      pax_open_kernel();
++                      *(void **)&ap->ops->set_piomode = NULL;
++                      pax_close_kernel();
+                       ap->pio_mask = 0;
+               }
+               if (quirk & CF_BROKEN_MWDMA)
+diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
+index f9b983a..887b9d8 100644
+--- a/drivers/atm/adummy.c
++++ b/drivers/atm/adummy.c
+@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
+               vcc->pop(vcc, skb);
+       else
+               dev_kfree_skb_any(skb);
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index 77a7480d..05cde58 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
+   PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+   
+   // VC layer stats
+-  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++  atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+   
+   // free the descriptor
+   kfree (tx_descr);
+@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+         dump_skb ("<<<", vc, skb);
+         
+         // VC layer stats
+-        atomic_inc(&atm_vcc->stats->rx);
++        atomic_inc_unchecked(&atm_vcc->stats->rx);
+         __net_timestamp(skb);
+         // end of our responsibility
+         atm_vcc->push (atm_vcc, skb);
+@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+       } else {
+               PRINTK (KERN_INFO, "dropped over-size frame");
+       // should we count this?
+-      atomic_inc(&atm_vcc->stats->rx_drop);
++      atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+       }
+       
+     } else {
+@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
+   }
+   
+   if (check_area (skb->data, skb->len)) {
+-    atomic_inc(&atm_vcc->stats->tx_err);
++    atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+     return -ENOMEM; // ?
+   }
+   
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index 0e3f8f9..765a7a5 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb(skb);
+               if (dev_data) return 0;
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOLINK;
+       }
+       size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       if (!new_skb) {
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOBUFS;
+       }
+       hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       if (vcc->pop) vcc->pop(vcc,skb);
+       else dev_kfree_skb(skb);
+       out_vcc->push(out_vcc,new_skb);
+-      atomic_inc(&vcc->stats->tx);
+-      atomic_inc(&out_vcc->stats->rx);
++      atomic_inc_unchecked(&vcc->stats->tx);
++      atomic_inc_unchecked(&out_vcc->stats->rx);
+       return 0;
+ }
+@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+       read_unlock(&vcc_sklist_lock);
+       if (!out_vcc) {
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               goto done;
+       }
+       skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       __net_timestamp(new_skb);
+       skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+       out_vcc->push(out_vcc,new_skb);
+-      atomic_inc(&vcc->stats->tx);
+-      atomic_inc(&out_vcc->stats->rx);
++      atomic_inc_unchecked(&vcc->stats->tx);
++      atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+       if (vcc->pop) vcc->pop(vcc,skb);
+       else dev_kfree_skb(skb);
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index b1955ba..b179940 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+               DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+                   vcc->dev->number);
+               length = 0;
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+       }
+       else {
+               length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+                           size);
+               }
+               eff = length = 0;
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+       }
+       else {
+               size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+                           "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+                           vcc->dev->number,vcc->vci,length,size << 2,descr);
+                       length = eff = 0;
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+               }
+       }
+       skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -767,7 +767,7 @@ rx_dequeued++;
+                       vcc->push(vcc,skb);
+                       pushed++;
+               }
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       }
+       wake_up(&eni_dev->rx_wait);
+ }
+@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
+                   PCI_DMA_TODEVICE);
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb_irq(skb);
+-              atomic_inc(&vcc->stats->tx);
++              atomic_inc_unchecked(&vcc->stats->tx);
+               wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+       }
+diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
+index b41c948..a002b17 100644
+--- a/drivers/atm/firestream.c
++++ b/drivers/atm/firestream.c
+@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
+                               }
+                       }
+-                      atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++                      atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+                       fs_dprintk (FS_DEBUG_TXMEM, "i");
+                       fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+ #endif
+                               skb_put (skb, qe->p1 & 0xffff); 
+                               ATM_SKB(skb)->vcc = atm_vcc;
+-                              atomic_inc(&atm_vcc->stats->rx);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx);
+                               __net_timestamp(skb);
+                               fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+                               atm_vcc->push (atm_vcc, skb);
+@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+                               kfree (pe);
+                       }
+                       if (atm_vcc)
+-                              atomic_inc(&atm_vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+                       break;
+               case 0x1f: /*  Reassembly abort: no buffers. */
+                       /* Silently increment error counter. */
+                       if (atm_vcc)
+-                              atomic_inc(&atm_vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+                       break;
+               default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+                       printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", 
+diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
+index 204814e..cede831 100644
+--- a/drivers/atm/fore200e.c
++++ b/drivers/atm/fore200e.c
+@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
+ #endif
+               /* check error condition */
+               if (*entry->status & STATUS_ERROR)
+-                  atomic_inc(&vcc->stats->tx_err);
++                  atomic_inc_unchecked(&vcc->stats->tx_err);
+               else
+-                  atomic_inc(&vcc->stats->tx);
++                  atomic_inc_unchecked(&vcc->stats->tx);
+           }
+       }
+@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+     if (skb == NULL) {
+       DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return -ENOMEM;
+     } 
+@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+       dev_kfree_skb_any(skb);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return -ENOMEM;
+     }
+     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+     vcc->push(vcc, skb);
+-    atomic_inc(&vcc->stats->rx);
++    atomic_inc_unchecked(&vcc->stats->rx);
+     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
+               DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+                       fore200e->atm_dev->number,
+                       entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+           }
+       }
+@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
+               goto retry_here;
+           }
+-          atomic_inc(&vcc->stats->tx_err);
++          atomic_inc_unchecked(&vcc->stats->tx_err);
+           fore200e->tx_sat++;
+           DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff --git a/drivers/atm/he.c b/drivers/atm/he.c
+index 507362a..a845e57 100644
+--- a/drivers/atm/he.c
++++ b/drivers/atm/he.c
+@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+               if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+                       hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto return_host_buffers;
+               }
+@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+                               RBRQ_LEN_ERR(he_dev->rbrq_head)
+                                                       ? "LEN_ERR" : "",
+                                                       vcc->vpi, vcc->vci);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto return_host_buffers;
+               }
+@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+               vcc->push(vcc, skb);
+               spin_lock(&he_dev->global_lock);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+ return_host_buffers:
+               ++pdus_assembled;
+@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
+                                       tpd->vcc->pop(tpd->vcc, tpd->skb);
+                               else
+                                       dev_kfree_skb_any(tpd->skb);
+-                              atomic_inc(&tpd->vcc->stats->tx_err);
++                              atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+                       }
+                       pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+                       return;
+@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -EINVAL;
+       }
+@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -EINVAL;
+       }
+ #endif
+@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               spin_unlock_irqrestore(&he_dev->global_lock, flags);
+               return -ENOMEM;
+       }
+@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                                       vcc->pop(vcc, skb);
+                               else
+                                       dev_kfree_skb_any(skb);
+-                              atomic_inc(&vcc->stats->tx_err);
++                              atomic_inc_unchecked(&vcc->stats->tx_err);
+                               spin_unlock_irqrestore(&he_dev->global_lock, flags);
+                               return -ENOMEM;
+                       }
+@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       __enqueue_tpd(he_dev, tpd, cid);
+       spin_unlock_irqrestore(&he_dev->global_lock, flags);
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
+index 1dc0519..1aadaf7 100644
+--- a/drivers/atm/horizon.c
++++ b/drivers/atm/horizon.c
+@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
+       {
+         struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+         // VC layer stats
+-        atomic_inc(&vcc->stats->rx);
++        atomic_inc_unchecked(&vcc->stats->rx);
+         __net_timestamp(skb);
+         // end of our responsibility
+         vcc->push (vcc, skb);
+@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
+       dev->tx_iovec = NULL;
+       
+       // VC layer stats
+-      atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++      atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+       
+       // free the skb
+       hrz_kfree_skb (skb);
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 272f009..a18ba55 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
+               else
+                       dev_kfree_skb(skb);
+-              atomic_inc(&vcc->stats->tx);
++              atomic_inc_unchecked(&vcc->stats->tx);
+       }
+       atomic_dec(&scq->used);
+@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       if ((sb = dev_alloc_skb(64)) == NULL) {
+                               printk("%s: Can't allocate buffers for aal0.\n",
+                                      card->name);
+-                              atomic_add(i, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i, &vcc->stats->rx_drop);
+                               break;
+                       }
+                       if (!atm_charge(vcc, sb->truesize)) {
+                               RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+                                        card->name);
+-                              atomic_add(i - 1, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+                               dev_kfree_skb(sb);
+                               break;
+                       }
+@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       ATM_SKB(sb)->vcc = vcc;
+                       __net_timestamp(sb);
+                       vcc->push(vcc, sb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       cell += ATM_CELL_PAYLOAD;
+               }
+@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                                "(CDC: %08x)\n",
+                                card->name, len, rpp->len, readl(SAR_REG_CDC));
+                       recycle_rx_pool_skb(card, rpp);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       return;
+               }
+               if (stat & SAR_RSQE_CRC) {
+                       RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+                       recycle_rx_pool_skb(card, rpp);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       return;
+               }
+               if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                               RXPRINTK("%s: Can't alloc RX skb.\n",
+                                        card->name);
+                               recycle_rx_pool_skb(card, rpp);
+-                              atomic_inc(&vcc->stats->rx_err);
++                              atomic_inc_unchecked(&vcc->stats->rx_err);
+                               return;
+                       }
+                       if (!atm_charge(vcc, skb->truesize)) {
+@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       __net_timestamp(skb);
+                       vcc->push(vcc, skb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       return;
+               }
+@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+               __net_timestamp(skb);
+               vcc->push(vcc, skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+               if (skb->truesize > SAR_FB_SIZE_3)
+                       add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
+               if (vcc->qos.aal != ATM_AAL0) {
+                       RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+                               card->name, vpi, vci);
+-                      atomic_inc(&vcc->stats->rx_drop);
++                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto drop;
+               }
+       
+               if ((sb = dev_alloc_skb(64)) == NULL) {
+                       printk("%s: Can't allocate buffers for AAL0.\n",
+                              card->name);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto drop;
+               }
+@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
+               ATM_SKB(sb)->vcc = vcc;
+               __net_timestamp(sb);
+               vcc->push(vcc, sb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+ drop:
+               skb_pull(queue, 64);
+@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+       if (vc == NULL) {
+               printk("%s: NULL connection in send().\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+       if (!test_bit(VCF_TX, &vc->flags)) {
+               printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+               break;
+       default:
+               printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               printk("%s: No scatter-gather yet.\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+       err = queue_skb(card, vc, skb, oam);
+       if (err) {
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return err;
+       }
+@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
+       skb = dev_alloc_skb(64);
+       if (!skb) {
+               printk("%s: Out of memory in send_oam().\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOMEM;
+       }
+       atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 4217f29..88f547a 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
+       status = (u_short) (buf_desc_ptr->desc_mode);  
+       if (status & (RX_CER | RX_PTE | RX_OFL))  
+       {  
+-                atomic_inc(&vcc->stats->rx_err);
++                atomic_inc_unchecked(&vcc->stats->rx_err);
+               IF_ERR(printk("IA: bad packet, dropping it");)  
+                 if (status & RX_CER) { 
+                     IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
+       len = dma_addr - buf_addr;  
+         if (len > iadev->rx_buf_sz) {
+            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+-           atomic_inc(&vcc->stats->rx_err);
++           atomic_inc_unchecked(&vcc->stats->rx_err);
+          goto out_free_desc;
+         }
+                 
+@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+           ia_vcc = INPH_IA_VCC(vcc);
+           if (ia_vcc == NULL)
+           {
+-             atomic_inc(&vcc->stats->rx_err);
++             atomic_inc_unchecked(&vcc->stats->rx_err);
+              atm_return(vcc, skb->truesize);
+              dev_kfree_skb_any(skb);
+              goto INCR_DLE;
+@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+           if ((length > iadev->rx_buf_sz) || (length > 
+                               (skb->len - sizeof(struct cpcs_trailer))))
+           {
+-             atomic_inc(&vcc->stats->rx_err);
++             atomic_inc_unchecked(&vcc->stats->rx_err);
+              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
+                                                             length, skb->len);)
+              atm_return(vcc, skb->truesize);
+@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+         IF_RX(printk("rx_dle_intr: skb push");)  
+         vcc->push(vcc,skb);  
+-        atomic_inc(&vcc->stats->rx);
++        atomic_inc_unchecked(&vcc->stats->rx);
+           iadev->rx_pkt_cnt++;
+       }  
+ INCR_DLE:
+@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+          {
+              struct k_sonet_stats *stats;
+              stats = &PRIV(_ia_dev[board])->sonet_stats;
+-             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+-             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
+-             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
+-             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
+-             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
+-             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
+-             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+-             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
+-             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
++             printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++             printk("line_bip   : %d\n", atomic_read_unchecked(&stats->line_bip));
++             printk("path_bip   : %d\n", atomic_read_unchecked(&stats->path_bip));
++             printk("line_febe  : %d\n", atomic_read_unchecked(&stats->line_febe));
++             printk("path_febe  : %d\n", atomic_read_unchecked(&stats->path_febe));
++             printk("corr_hcs   : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++             printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++             printk("tx_cells   : %d\n", atomic_read_unchecked(&stats->tx_cells));
++             printk("rx_cells   : %d\n", atomic_read_unchecked(&stats->rx_cells));
+          }
+             ia_cmds.status = 0;
+             break;
+@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+       if ((desc == 0) || (desc > iadev->num_tx_desc))  
+       {  
+               IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
+-                atomic_inc(&vcc->stats->tx);
++                atomic_inc_unchecked(&vcc->stats->tx);
+               if (vcc->pop)   
+                   vcc->pop(vcc, skb);   
+               else  
+@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+         ATM_DESC(skb) = vcc->vci;
+         skb_queue_tail(&iadev->tx_dma_q, skb);
+-        atomic_inc(&vcc->stats->tx);
++        atomic_inc_unchecked(&vcc->stats->tx);
+         iadev->tx_pkt_cnt++;
+       /* Increment transaction counter */  
+       writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
+         
+ #if 0        
+         /* add flow control logic */ 
+-        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++        if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+           if (iavcc->vc_desc_cnt > 10) {
+              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
+             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
+index fa7d701..1e404c7 100644
+--- a/drivers/atm/lanai.c
++++ b/drivers/atm/lanai.c
+@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
+       vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+       lanai_endtx(lanai, lvcc);
+       lanai_free_skb(lvcc->tx.atmvcc, skb);
+-      atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++      atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
+       ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+       __net_timestamp(skb);
+       lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+-      atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++      atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+     out:
+       lvcc->rx.buf.ptr = end;
+       cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+                   "vcc %d\n", lanai->number, (unsigned int) s, vci);
+               lanai->stats.service_rxnotaal5++;
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               return 0;
+       }
+       if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               int bytes;
+               read_unlock(&vcc_sklist_lock);
+               DPRINTK("got trashed rx pdu on vci %d\n", vci);
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               lvcc->stats.x.aal5.service_trash++;
+               bytes = (SERVICE_GET_END(s) * 16) -
+                   (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+       }
+       if (s & SERVICE_STREAM) {
+               read_unlock(&vcc_sklist_lock);
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               lvcc->stats.x.aal5.service_stream++;
+               printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+                   "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               return 0;
+       }
+       DPRINTK("got rx crc error on vci %d\n", vci);
+-      atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++      atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+       lvcc->stats.x.aal5.service_rxcrc++;
+       lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+       cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 6587dc2..149833d 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+               printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if (!vc->tx) {
+               printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+               printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               printk("nicstar%d: No scatter-gather yet.\n", card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       }
+       if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EIO;
+       }
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               printk
+                                   ("nicstar%d: Can't allocate buffers for aal0.\n",
+                                    card->index);
+-                              atomic_add(i, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i, &vcc->stats->rx_drop);
+                               break;
+                       }
+                       if (!atm_charge(vcc, sb->truesize)) {
+                               RXPRINTK
+                                   ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+                                    card->index);
+-                              atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
++                              atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);      /* already increased by 1 */
+                               dev_kfree_skb_any(sb);
+                               break;
+                       }
+@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       ATM_SKB(sb)->vcc = vcc;
+                       __net_timestamp(sb);
+                       vcc->push(vcc, sb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       cell += ATM_CELL_PAYLOAD;
+               }
+@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       if (iovb == NULL) {
+                               printk("nicstar%d: Out of iovec buffers.\n",
+                                      card->index);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               recycle_rx_buf(card, skb);
+                               return;
+                       }
+@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                  small or large buffer itself. */
+       } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+               printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+               recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                     NS_MAX_IOVECS);
+               NS_PRV_IOVCNT(iovb) = 0;
+@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                           ("nicstar%d: Expected a small buffer, and this is not one.\n",
+                            card->index);
+                       which_list(card, skb);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_rx_buf(card, skb);
+                       vc->rx_iov = NULL;
+                       recycle_iov_buf(card, iovb);
+@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                           ("nicstar%d: Expected a large buffer, and this is not one.\n",
+                            card->index);
+                       which_list(card, skb);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                             NS_PRV_IOVCNT(iovb));
+                       vc->rx_iov = NULL;
+@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               printk(" - PDU size mismatch.\n");
+                       else
+                               printk(".\n");
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                             NS_PRV_IOVCNT(iovb));
+                       vc->rx_iov = NULL;
+@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       /* skb points to a small buffer */
+                       if (!atm_charge(vcc, skb->truesize)) {
+                               push_rxbufs(card, skb);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       } else {
+                               skb_put(skb, len);
+                               dequeue_sm_buf(card, skb);
+@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               ATM_SKB(skb)->vcc = vcc;
+                               __net_timestamp(skb);
+                               vcc->push(vcc, skb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                       }
+               } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
+                       struct sk_buff *sb;
+@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       if (len <= NS_SMBUFSIZE) {
+                               if (!atm_charge(vcc, sb->truesize)) {
+                                       push_rxbufs(card, sb);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               } else {
+                                       skb_put(sb, len);
+                                       dequeue_sm_buf(card, sb);
+@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       ATM_SKB(sb)->vcc = vcc;
+                                       __net_timestamp(sb);
+                                       vcc->push(vcc, sb);
+-                                      atomic_inc(&vcc->stats->rx);
++                                      atomic_inc_unchecked(&vcc->stats->rx);
+                               }
+                               push_rxbufs(card, skb);
+@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               if (!atm_charge(vcc, skb->truesize)) {
+                                       push_rxbufs(card, skb);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               } else {
+                                       dequeue_lg_buf(card, skb);
+ #ifdef NS_USE_DESTRUCTORS
+@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       ATM_SKB(skb)->vcc = vcc;
+                                       __net_timestamp(skb);
+                                       vcc->push(vcc, skb);
+-                                      atomic_inc(&vcc->stats->rx);
++                                      atomic_inc_unchecked(&vcc->stats->rx);
+                               }
+                               push_rxbufs(card, sb);
+@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       printk
+                                           ("nicstar%d: Out of huge buffers.\n",
+                                            card->index);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                                       recycle_iovec_rx_bufs(card,
+                                                             (struct iovec *)
+                                                             iovb->data,
+@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       card->hbpool.count++;
+                               } else
+                                       dev_kfree_skb_any(hb);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       } else {
+                               /* Copy the small buffer to the huge buffer */
+                               sb = (struct sk_buff *)iov->iov_base;
+@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ #endif /* NS_USE_DESTRUCTORS */
+                               __net_timestamp(hb);
+                               vcc->push(vcc, hb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                       }
+               }
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 32784d1..4a8434a 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
+                               }
+                               atm_charge(vcc, skb->truesize);
+                               vcc->push(vcc, skb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                               break;
+                       case PKT_STATUS:
+@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+                       vcc = SKB_CB(oldskb)->vcc;
+                       if (vcc) {
+-                              atomic_inc(&vcc->stats->tx);
++                              atomic_inc_unchecked(&vcc->stats->tx);
+                               solos_pop(vcc, oldskb);
+                       } else {
+                               dev_kfree_skb_irq(oldskb);
+diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
+index 0215934..ce9f5b1 100644
+--- a/drivers/atm/suni.c
++++ b/drivers/atm/suni.c
+@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+ #define ADD_LIMITED(s,v) \
+-    atomic_add((v),&stats->s); \
+-    if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++    atomic_add_unchecked((v),&stats->s); \
++    if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+ static void suni_hz(unsigned long from_timer)
+diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
+index 5120a96..e2572bd 100644
+--- a/drivers/atm/uPD98402.c
++++ b/drivers/atm/uPD98402.c
+@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
+       struct sonet_stats tmp;
+       int error = 0;
+-      atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++      atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+       sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+       if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+       if (zero && !error) {
+@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
+ #define ADD_LIMITED(s,v) \
+-    { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+-    if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+-      atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++    { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++    if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+ static void stat_event(struct atm_dev *dev)
+@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
+               if (reason & uPD98402_INT_PFM) stat_event(dev);
+               if (reason & uPD98402_INT_PCO) {
+                       (void) GET(PCOCR); /* clear interrupt cause */
+-                      atomic_add(GET(HECCT),
++                      atomic_add_unchecked(GET(HECCT),
+                           &PRIV(dev)->sonet_stats.uncorr_hcs);
+               }
+               if ((reason & uPD98402_INT_RFO) && 
+@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
+       PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+         uPD98402_INT_LOS),PIMR); /* enable them */
+       (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+-      atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+-      atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+-      atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+       return 0;
+ }
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index 969c3c2..9b72956 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+               }
+               if (!size) {
+                       dev_kfree_skb_irq(skb);
+-                      if (vcc) atomic_inc(&vcc->stats->rx_err);
++                      if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+                       continue;
+               }
+               if (!atm_charge(vcc,skb->truesize)) {
+@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+               skb->len = size;
+               ATM_SKB(skb)->vcc = vcc;
+               vcc->push(vcc,skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       }
+       zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
+                       skb_queue_head(&zatm_vcc->backlog,skb);
+                       break;
+               }
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       wake_up(&zatm_vcc->tx_wait);
+ }
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index d78b204..ecc1929 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
+               ic->classdev.parent = get_device(dev);
+               ic->classdev.class = cont->class;
+               cont->class->dev_release = attribute_container_release;
+-              dev_set_name(&ic->classdev, dev_name(dev));
++              dev_set_name(&ic->classdev, "%s", dev_name(dev));
+               if (fn)
+                       fn(cont, dev, &ic->classdev);
+               else
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index d414331..b4dd4ba 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
+               return -EINVAL;
+       mutex_lock(&subsys->p->mutex);
+-      list_add_tail(&sif->node, &subsys->p->interfaces);
++      pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
+       if (sif->add_dev) {
+               subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+               while ((dev = subsys_dev_iter_next(&iter)))
+@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
+       subsys = sif->subsys;
+       mutex_lock(&subsys->p->mutex);
+-      list_del_init(&sif->node);
++      pax_list_del_init((struct list_head *)&sif->node);
+       if (sif->remove_dev) {
+               subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+               while ((dev = subsys_dev_iter_next(&iter)))
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index 7413d06..79155fa 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
+       if (!thread)
+               return 0;
+-      err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
++      err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
+       if (err)
+               printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+       else
+@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
+       *err = sys_unshare(CLONE_NEWNS);
+       if (*err)
+               goto out;
+-      *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
++      *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
+       if (*err)
+               goto out;
+-      sys_chdir("/.."); /* will traverse into overmounted root */
+-      sys_chroot(".");
++      sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
++      sys_chroot((char __force_user *)".");
+       complete(&setup_done);
+       while (1) {
+               spin_lock(&req_lock);
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 7616a77c..8f57f51 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
+ struct node_attr {
+       struct device_attribute attr;
+       enum node_states state;
+-};
++} __do_const;
+ static ssize_t show_node_state(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 7072404..76dcebd 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+ {
+       struct cpuidle_driver *cpuidle_drv;
+       struct gpd_cpu_data *cpu_data;
+-      struct cpuidle_state *idle_state;
++      cpuidle_state_no_const *idle_state;
+       int ret = 0;
+       if (IS_ERR_OR_NULL(genpd) || state < 0)
+@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
+ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+ {
+       struct gpd_cpu_data *cpu_data;
+-      struct cpuidle_state *idle_state;
++      cpuidle_state_no_const *idle_state;
+       int ret = 0;
+       if (IS_ERR_OR_NULL(genpd))
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index a53ebd2..8f73eeb 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
+                       return -EIO;
+               }
+       }
+-      return sprintf(buf, p);
++      return sprintf(buf, "%s", p);
+ }
+ static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 79715e7..df06b3b 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
+  * They need to be modified together atomically, so it's better to use one
+  * atomic variable to hold them both.
+  */
+-static atomic_t combined_event_count = ATOMIC_INIT(0);
++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
+ #define IN_PROGRESS_BITS      (sizeof(int) * 4)
+ #define MAX_IN_PROGRESS               ((1 << IN_PROGRESS_BITS) - 1)
+ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ {
+-      unsigned int comb = atomic_read(&combined_event_count);
++      unsigned int comb = atomic_read_unchecked(&combined_event_count);
+       *cnt = (comb >> IN_PROGRESS_BITS);
+       *inpr = comb & MAX_IN_PROGRESS;
+@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
+               ws->start_prevent_time = ws->last_time;
+       /* Increment the counter of events in progress. */
+-      cec = atomic_inc_return(&combined_event_count);
++      cec = atomic_inc_return_unchecked(&combined_event_count);
+       trace_wakeup_source_activate(ws->name, cec);
+ }
+@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
+        * Increment the counter of registered wakeup events and decrement the
+        * couter of wakeup events in progress simultaneously.
+        */
+-      cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
++      cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
+       trace_wakeup_source_deactivate(ws->name, cec);
+       split_counters(&cnt, &inpr);
+diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
+index e8d11b6..7b1b36f 100644
+--- a/drivers/base/syscore.c
++++ b/drivers/base/syscore.c
+@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
+ void register_syscore_ops(struct syscore_ops *ops)
+ {
+       mutex_lock(&syscore_ops_lock);
+-      list_add_tail(&ops->node, &syscore_ops_list);
++      pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
+       mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(register_syscore_ops);
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
+ void unregister_syscore_ops(struct syscore_ops *ops)
+ {
+       mutex_lock(&syscore_ops_lock);
+-      list_del(&ops->node);
++      pax_list_del((struct list_head *)&ops->node);
+       mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index 62b6c2c..4a11354 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+       int err;
+       u32 cp;
++      memset(&arg64, 0, sizeof(arg64));
++
+       err = 0;
+       err |=
+           copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
+       while (!list_empty(&h->reqQ)) {
+               c = list_entry(h->reqQ.next, CommandList_struct, list);
+               /* can't do anything if fifo is full */
+-              if ((h->access.fifo_full(h))) {
++              if ((h->access->fifo_full(h))) {
+                       dev_warn(&h->pdev->dev, "fifo full\n");
+                       break;
+               }
+@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
+               h->Qdepth--;
+               /* Tell the controller execute command */
+-              h->access.submit_command(h, c);
++              h->access->submit_command(h, c);
+               /* Put job onto the completed Q */
+               addQ(&h->cmpQ, c);
+@@ -3446,17 +3448,17 @@ startio:
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+-      return h->access.command_completed(h);
++      return h->access->command_completed(h);
+ }
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+-      return h->access.intr_pending(h);
++      return h->access->intr_pending(h);
+ }
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+-      return ((h->access.intr_pending(h) == 0) ||
++      return ((h->access->intr_pending(h) == 0) ||
+               (h->interrupts_enabled == 0));
+ }
+@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
+       u32 a;
+       if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+-              return h->access.command_completed(h);
++              return h->access->command_completed(h);
+       if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+               a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+               trans_support & CFGTBL_Trans_use_short_tags);
+       /* Change the access methods to the performant access methods */
+-      h->access = SA5_performant_access;
++      h->access = &SA5_performant_access;
+       h->transMethod = CFGTBL_Trans_Performant;
+       return;
+@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
+       if (prod_index < 0)
+               return -ENODEV;
+       h->product_name = products[prod_index].product_name;
+-      h->access = *(products[prod_index].access);
++      h->access = products[prod_index].access;
+       if (cciss_board_disabled(h)) {
+               dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
+       }
+       /* make sure the board interrupts are off */
+-      h->access.set_intr_mask(h, CCISS_INTR_OFF);
++      h->access->set_intr_mask(h, CCISS_INTR_OFF);
+       rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
+       if (rc)
+               goto clean2;
+@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
+                * fake ones to scoop up any residual completions.
+                */
+               spin_lock_irqsave(&h->lock, flags);
+-              h->access.set_intr_mask(h, CCISS_INTR_OFF);
++              h->access->set_intr_mask(h, CCISS_INTR_OFF);
+               spin_unlock_irqrestore(&h->lock, flags);
+               free_irq(h->intr[h->intr_mode], h);
+               rc = cciss_request_irq(h, cciss_msix_discard_completions,
+@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
+               dev_info(&h->pdev->dev, "Board READY.\n");
+               dev_info(&h->pdev->dev,
+                       "Waiting for stale completions to drain.\n");
+-              h->access.set_intr_mask(h, CCISS_INTR_ON);
++              h->access->set_intr_mask(h, CCISS_INTR_ON);
+               msleep(10000);
+-              h->access.set_intr_mask(h, CCISS_INTR_OFF);
++              h->access->set_intr_mask(h, CCISS_INTR_OFF);
+               rc = controller_reset_failed(h->cfgtable);
+               if (rc)
+@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
+       cciss_scsi_setup(h);
+       /* Turn the interrupts on so we can service requests */
+-      h->access.set_intr_mask(h, CCISS_INTR_ON);
++      h->access->set_intr_mask(h, CCISS_INTR_ON);
+       /* Get the firmware version */
+       inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
+       kfree(flush_buf);
+       if (return_code != IO_OK)
+               dev_warn(&h->pdev->dev, "Error flushing cache\n");
+-      h->access.set_intr_mask(h, CCISS_INTR_OFF);
++      h->access->set_intr_mask(h, CCISS_INTR_OFF);
+       free_irq(h->intr[h->intr_mode], h);
+ }
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 7fda30e..eb5dfe0 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -101,7 +101,7 @@ struct ctlr_info
+       /* information about each logical volume */
+       drive_info_struct *drv[CISS_MAX_LUN];
+-      struct access_method access;
++      struct access_method *access;
+       /* queue and queue Info */ 
+       struct list_head reqQ;
+diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
+index 639d26b..fd6ad1f 100644
+--- a/drivers/block/cpqarray.c
++++ b/drivers/block/cpqarray.c
+@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
+       if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
+               goto Enomem4;
+       }
+-      hba[i]->access.set_intr_mask(hba[i], 0);
++      hba[i]->access->set_intr_mask(hba[i], 0);
+       if (request_irq(hba[i]->intr, do_ida_intr,
+               IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
+       {
+@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
+       add_timer(&hba[i]->timer);
+       /* Enable IRQ now that spinlock and rate limit timer are set up */
+-      hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
++      hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+       for(j=0; j<NWD; j++) {
+               struct gendisk *disk = ida_gendisk[i][j];
+@@ -694,7 +694,7 @@ DBGINFO(
+       for(i=0; i<NR_PRODUCTS; i++) {
+               if (board_id == products[i].board_id) {
+                       c->product_name = products[i].product_name;
+-                      c->access = *(products[i].access);
++                      c->access = products[i].access;
+                       break;
+               }
+       }
+@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
+               hba[ctlr]->intr = intr;
+               sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
+               hba[ctlr]->product_name = products[j].product_name;
+-              hba[ctlr]->access = *(products[j].access);
++              hba[ctlr]->access = products[j].access;
+               hba[ctlr]->ctlr = ctlr;
+               hba[ctlr]->board_id = board_id;
+               hba[ctlr]->pci_dev = NULL; /* not PCI */
+@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
+       while((c = h->reqQ) != NULL) {
+               /* Can't do anything if we're busy */
+-              if (h->access.fifo_full(h) == 0)
++              if (h->access->fifo_full(h) == 0)
+                       return;
+               /* Get the first entry from the request Q */
+@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
+               h->Qdepth--;
+       
+               /* Tell the controller to do our bidding */
+-              h->access.submit_command(h, c);
++              h->access->submit_command(h, c);
+               /* Get onto the completion Q */
+               addQ(&h->cmpQ, c);
+@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
+       unsigned long flags;
+       __u32 a,a1;
+-      istat = h->access.intr_pending(h);
++      istat = h->access->intr_pending(h);
+       /* Is this interrupt for us? */
+       if (istat == 0)
+               return IRQ_NONE;
+@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
+        */
+       spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
+       if (istat & FIFO_NOT_EMPTY) {
+-              while((a = h->access.command_completed(h))) {
++              while((a = h->access->command_completed(h))) {
+                       a1 = a; a &= ~3;
+                       if ((c = h->cmpQ) == NULL)
+                       {  
+@@ -1193,6 +1193,7 @@ out_passthru:
+               ida_pci_info_struct pciinfo;
+               if (!arg) return -EINVAL;
++              memset(&pciinfo, 0, sizeof(pciinfo));
+               pciinfo.bus = host->pci_dev->bus->number;
+               pciinfo.dev_fn = host->pci_dev->devfn;
+               pciinfo.board_id = host->board_id;
+@@ -1447,11 +1448,11 @@ static int sendcmd(
+       /*
+        * Disable interrupt
+        */
+-      info_p->access.set_intr_mask(info_p, 0);
++      info_p->access->set_intr_mask(info_p, 0);
+       /* Make sure there is room in the command FIFO */
+       /* Actually it should be completely empty at this time. */
+       for (i = 200000; i > 0; i--) {
+-              temp = info_p->access.fifo_full(info_p);
++              temp = info_p->access->fifo_full(info_p);
+               if (temp != 0) {
+                       break;
+               }
+@@ -1464,7 +1465,7 @@ DBG(
+       /*
+        * Send the cmd
+        */
+-      info_p->access.submit_command(info_p, c);
++      info_p->access->submit_command(info_p, c);
+       complete = pollcomplete(ctlr);
+       
+       pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
+@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
+        * we check the new geometry.  Then turn interrupts back on when
+        * we're done.
+        */
+-      host->access.set_intr_mask(host, 0);
++      host->access->set_intr_mask(host, 0);
+       getgeometry(ctlr);
+-      host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
++      host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
+       for(i=0; i<NWD; i++) {
+               struct gendisk *disk = ida_gendisk[ctlr][i];
+@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
+       /* Wait (up to 2 seconds) for a command to complete */
+       for (i = 200000; i > 0; i--) {
+-              done = hba[ctlr]->access.command_completed(hba[ctlr]);
++              done = hba[ctlr]->access->command_completed(hba[ctlr]);
+               if (done == 0) {
+                       udelay(10);     /* a short fixed delay */
+               } else
+diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
+index be73e9d..7fbf140 100644
+--- a/drivers/block/cpqarray.h
++++ b/drivers/block/cpqarray.h
+@@ -99,7 +99,7 @@ struct ctlr_info {
+       drv_info_t      drv[NWD];
+       struct proc_dir_entry *proc;
+-      struct access_method access;
++      struct access_method *access;
+       cmdlist_t *reqQ;
+       cmdlist_t *cmpQ;
+diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
+index f943aac..99bfd19 100644
+--- a/drivers/block/drbd/drbd_int.h
++++ b/drivers/block/drbd/drbd_int.h
+@@ -582,7 +582,7 @@ struct drbd_epoch {
+       struct drbd_tconn *tconn;
+       struct list_head list;
+       unsigned int barrier_nr;
+-      atomic_t epoch_size; /* increased on every request added. */
++      atomic_unchecked_t epoch_size; /* increased on every request added. */
+       atomic_t active;     /* increased on every req. added, and dec on every finished. */
+       unsigned long flags;
+ };
+@@ -1021,7 +1021,7 @@ struct drbd_conf {
+       unsigned int al_tr_number;
+       int al_tr_cycle;
+       wait_queue_head_t seq_wait;
+-      atomic_t packet_seq;
++      atomic_unchecked_t packet_seq;
+       unsigned int peer_seq;
+       spinlock_t peer_seq_lock;
+       unsigned int minor;
+@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
+       char __user *uoptval;
+       int err;
+-      uoptval = (char __user __force *)optval;
++      uoptval = (char __force_user *)optval;
+       set_fs(KERNEL_DS);
+       if (level == SOL_SOCKET)
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index a5dca6a..bb27967 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+       p->sector = sector;
+       p->block_id = block_id;
+       p->blksize = blksize;
+-      p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
++      p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
+       return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ }
+@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
+               return -EIO;
+       p->sector = cpu_to_be64(req->i.sector);
+       p->block_id = (unsigned long)req;
+-      p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
++      p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
+       dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
+       if (mdev->state.conn >= C_SYNC_SOURCE &&
+           mdev->state.conn <= C_PAUSED_SYNC_T)
+@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
+ {
+       struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+-      if (atomic_read(&tconn->current_epoch->epoch_size) !=  0)
+-              conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
++      if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) !=  0)
++              conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
+       kfree(tconn->current_epoch);
+       idr_destroy(&tconn->volumes);
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 4222aff..1f79506 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
+ {
+       int err;
+-      atomic_set(&mdev->packet_seq, 0);
++      atomic_set_unchecked(&mdev->packet_seq, 0);
+       mdev->peer_seq = 0;
+       mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
+       do {
+               next_epoch = NULL;
+-              epoch_size = atomic_read(&epoch->epoch_size);
++              epoch_size = atomic_read_unchecked(&epoch->epoch_size);
+               switch (ev & ~EV_CLEANUP) {
+               case EV_PUT:
+@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
+                                       rv = FE_DESTROYED;
+                       } else {
+                               epoch->flags = 0;
+-                              atomic_set(&epoch->epoch_size, 0);
++                              atomic_set_unchecked(&epoch->epoch_size, 0);
+                               /* atomic_set(&epoch->active, 0); is already zero */
+                               if (rv == FE_STILL_LIVE)
+                                       rv = FE_RECYCLED;
+@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+               conn_wait_active_ee_empty(tconn);
+               drbd_flush(tconn);
+-              if (atomic_read(&tconn->current_epoch->epoch_size)) {
++              if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
+                       epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+                       if (epoch)
+                               break;
+@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+       }
+       epoch->flags = 0;
+-      atomic_set(&epoch->epoch_size, 0);
++      atomic_set_unchecked(&epoch->epoch_size, 0);
+       atomic_set(&epoch->active, 0);
+       spin_lock(&tconn->epoch_lock);
+-      if (atomic_read(&tconn->current_epoch->epoch_size)) {
++      if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
+               list_add(&epoch->list, &tconn->current_epoch->list);
+               tconn->current_epoch = epoch;
+               tconn->epochs++;
+@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
+               err = wait_for_and_update_peer_seq(mdev, peer_seq);
+               drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+-              atomic_inc(&tconn->current_epoch->epoch_size);
++              atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
+               err2 = drbd_drain_block(mdev, pi->size);
+               if (!err)
+                       err = err2;
+@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
+       spin_lock(&tconn->epoch_lock);
+       peer_req->epoch = tconn->current_epoch;
+-      atomic_inc(&peer_req->epoch->epoch_size);
++      atomic_inc_unchecked(&peer_req->epoch->epoch_size);
+       atomic_inc(&peer_req->epoch->active);
+       spin_unlock(&tconn->epoch_lock);
+@@ -4347,7 +4347,7 @@ struct data_cmd {
+       int expect_payload;
+       size_t pkt_size;
+       int (*fn)(struct drbd_tconn *, struct packet_info *);
+-};
++} __do_const;
+ static struct data_cmd drbd_cmd_handler[] = {
+       [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
+@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
+       if (!list_empty(&tconn->current_epoch->list))
+               conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+       /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+-      atomic_set(&tconn->current_epoch->epoch_size, 0);
++      atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
+       tconn->send.seen_any_write_yet = false;
+       conn_info(tconn, "Connection closed\n");
+@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
+ struct asender_cmd {
+       size_t pkt_size;
+       int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
+-};
++} __do_const;
+ static struct asender_cmd asender_tbl[] = {
+       [P_PING]            = { 0, got_Ping },
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d92d50f..a7e9d97 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
+       file_start_write(file);
+       set_fs(get_ds());
+-      bw = file->f_op->write(file, buf, len, &pos);
++      bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
+       set_fs(old_fs);
+       file_end_write(file);
+       if (likely(bw == len))
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index f5d0ea1..c62380a 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -84,7 +84,7 @@
+ #define MAX_SPEED 0xffff
+ #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
+-                      ~(sector_t)((pd)->settings.size - 1))
++                      ~(sector_t)((pd)->settings.size - 1UL))
+ static DEFINE_MUTEX(pktcdvd_mutex);
+ static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 8a3aff7..d7538c2 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
+       ENSURE(reset, CDC_RESET);
+       ENSURE(generic_packet, CDC_GENERIC_PACKET);
+       cdi->mc_flags = 0;
+-      cdo->n_minors = 0;
+         cdi->options = CDO_USE_FFLAGS;
+       
+       if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
+@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
+       else
+               cdi->cdda_method = CDDA_OLD;
+-      if (!cdo->generic_packet)
+-              cdo->generic_packet = cdrom_dummy_generic_packet;
++      if (!cdo->generic_packet) {
++              pax_open_kernel();
++              *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
++              pax_close_kernel();
++      }
+       cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+       mutex_lock(&cdrom_mutex);
+@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
+       if (cdi->exit)
+               cdi->exit(cdi);
+-      cdi->ops->n_minors--;
+       cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+ }
+@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+        */
+       nr = nframes;
+       do {
+-              cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
++              cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+               if (cgc.buffer)
+                       break;
+@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
+       struct cdrom_device_info *cdi;
+       int ret;
+-      ret = scnprintf(info + *pos, max_size - *pos, header);
++      ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
+       if (!ret)
+               return 1;
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 4afcb65..a68a32d 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
+       .audio_ioctl            = gdrom_audio_ioctl,
+       .capability             = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
+                                 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
+-      .n_minors               = 1,
+ };
+ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 3bb6fa3..34013fb 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
+ config DEVKMEM
+       bool "/dev/kmem virtual device support"
+-      default y
++      default n
++      depends on !GRKERNSEC_KMEM
+       help
+         Say Y here if you want to support the /dev/kmem device. The
+         /dev/kmem device is rarely used, but can be used for certain
+@@ -582,6 +583,7 @@ config DEVPORT
+       bool
+       depends on !M68K
+       depends on ISA || PCI
++      depends on !GRKERNSEC_KMEM
+       default y
+ source "drivers/s390/char/Kconfig"
+diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
+index a48e05b..6bac831 100644
+--- a/drivers/char/agp/compat_ioctl.c
++++ b/drivers/char/agp/compat_ioctl.c
+@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
+                       return -ENOMEM;
+               }
+-              if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
++              if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
+                                  sizeof(*usegment) * ureserve.seg_count)) {
+                       kfree(usegment);
+                       kfree(ksegment);
+diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
+index 2e04433..771f2cc 100644
+--- a/drivers/char/agp/frontend.c
++++ b/drivers/char/agp/frontend.c
+@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+       if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+               return -EFAULT;
+-      if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++      if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+               return -EFAULT;
+       client = agp_find_client_by_pid(reserve.pid);
+@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+               if (segment == NULL)
+                       return -ENOMEM;
+-              if (copy_from_user(segment, (void __user *) reserve.seg_list,
++              if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
+                                  sizeof(struct agp_segment) * reserve.seg_count)) {
+                       kfree(segment);
+                       return -EFAULT;
+diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
+index 4f94375..413694e 100644
+--- a/drivers/char/genrtc.c
++++ b/drivers/char/genrtc.c
+@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
+       switch (cmd) {
+       case RTC_PLL_GET:
++          memset(&pll, 0, sizeof(pll));
+           if (get_rtc_pll(&pll))
+                   return -EINVAL;
+           else
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index d784650..e8bfd69 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
+ }
+ static int
+-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+                 struct hpet_info *info)
+ {
+       struct hpet_timer __iomem *timer;
+diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
+index 86fe45c..c0ea948 100644
+--- a/drivers/char/hw_random/intel-rng.c
++++ b/drivers/char/hw_random/intel-rng.c
+@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
+               if (no_fwh_detect)
+                       return -ENODEV;
+-              printk(warning);
++              printk("%s", warning);
+               return -EBUSY;
+       }
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 4445fa1..7c6de37 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -420,7 +420,7 @@ struct ipmi_smi {
+       struct proc_dir_entry *proc_dir;
+       char                  proc_dir_name[10];
+-      atomic_t stats[IPMI_NUM_STATS];
++      atomic_unchecked_t stats[IPMI_NUM_STATS];
+       /*
+        * run_to_completion duplicate of smb_info, smi_info
+@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+ #define ipmi_inc_stat(intf, stat) \
+-      atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++      atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+-      ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++      ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+ static int is_lan_addr(struct ipmi_addr *addr)
+ {
+@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+       INIT_LIST_HEAD(&intf->cmd_rcvrs);
+       init_waitqueue_head(&intf->waitq);
+       for (i = 0; i < IPMI_NUM_STATS; i++)
+-              atomic_set(&intf->stats[i], 0);
++              atomic_set_unchecked(&intf->stats[i], 0);
+       intf->proc_dir = NULL;
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index af4b23f..79806fc 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -275,7 +275,7 @@ struct smi_info {
+       unsigned char slave_addr;
+       /* Counters and things for the proc filesystem. */
+-      atomic_t stats[SI_NUM_STATS];
++      atomic_unchecked_t stats[SI_NUM_STATS];
+       struct task_struct *thread;
+@@ -284,9 +284,9 @@ struct smi_info {
+ };
+ #define smi_inc_stat(smi, stat) \
+-      atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++      atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+-      ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++      ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+ #define SI_MAX_PARMS 4
+@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
+       atomic_set(&new_smi->req_events, 0);
+       new_smi->run_to_completion = 0;
+       for (i = 0; i < SI_NUM_STATS; i++)
+-              atomic_set(&new_smi->stats[i], 0);
++              atomic_set_unchecked(&new_smi->stats[i], 0);
+       new_smi->interrupt_disabled = 1;
+       atomic_set(&new_smi->stop_operation, 0);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 1ccbe94..6ad651a 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -38,6 +39,10 @@
+ #define DEVPORT_MINOR 4
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern const struct file_operations grsec_fops;
++#endif
++
+ static inline unsigned long size_inside_page(unsigned long start,
+                                            unsigned long size)
+ {
+@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+       while (cursor < to) {
+               if (!devmem_is_allowed(pfn)) {
++#ifdef CONFIG_GRKERNSEC_KMEM
++                      gr_handle_mem_readwrite(from, to);
++#else
+                       printk(KERN_INFO
+               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+                               current->comm, from, to);
++#endif
+                       return 0;
+               }
+               cursor += PAGE_SIZE;
+@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+       }
+       return 1;
+ }
++#elif defined(CONFIG_GRKERNSEC_KMEM)
++static inline int range_is_allowed(unsigned long pfn, unsigned long size)
++{
++      return 0;
++}
+ #else
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+       while (count > 0) {
+               unsigned long remaining;
++              char *temp;
+               sz = size_inside_page(p, count);
+@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+               if (!ptr)
+                       return -EFAULT;
+-              remaining = copy_to_user(buf, ptr, sz);
++#ifdef CONFIG_PAX_USERCOPY
++              temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
++              if (!temp) {
++                      unxlate_dev_mem_ptr(p, ptr);
++                      return -ENOMEM;
++              }
++              memcpy(temp, ptr, sz);
++#else
++              temp = ptr;
++#endif
++
++              remaining = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++              kfree(temp);
++#endif
++
+               unxlate_dev_mem_ptr(p, ptr);
+               if (remaining)
+                       return -EFAULT;
+@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
+               else
+                       csize = count;
+-              rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
++              rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
+               if (rc < 0)
+                       return rc;
+               buf += csize;
+@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+                        size_t count, loff_t *ppos)
+ {
+       unsigned long p = *ppos;
+-      ssize_t low_count, read, sz;
++      ssize_t low_count, read, sz, err = 0;
+       char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+-      int err = 0;
+       read = 0;
+       if (p < (unsigned long) high_memory) {
+@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+               }
+ #endif
+               while (low_count > 0) {
++                      char *temp;
++
+                       sz = size_inside_page(p, low_count);
+                       /*
+@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+                        */
+                       kbuf = xlate_dev_kmem_ptr((char *)p);
+-                      if (copy_to_user(buf, kbuf, sz))
++#ifdef CONFIG_PAX_USERCOPY
++                      temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
++                      if (!temp)
++                              return -ENOMEM;
++                      memcpy(temp, kbuf, sz);
++#else
++                      temp = kbuf;
++#endif
++
++                      err = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++                      kfree(temp);
++#endif
++
++                      if (err)
+                               return -EFAULT;
+                       buf += sz;
+                       p += sz;
+@@ -869,6 +916,9 @@ static const struct memdev {
+ #ifdef CONFIG_CRASH_DUMP
+       [12] = { "oldmem", 0, &oldmem_fops, NULL },
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++      [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
++#endif
+ };
+ static int memory_open(struct inode *inode, struct file *filp)
+@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
+                       continue;
+               device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+-                            NULL, devlist[minor].name);
++                            NULL, "%s", devlist[minor].name);
+       }
+       return tty_init();
+diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
+index c689697..04e6d6a2 100644
+--- a/drivers/char/mwave/tp3780i.c
++++ b/drivers/char/mwave/tp3780i.c
+@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
+       PRINTK_2(TRACE_TP3780I,
+               "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
++      memset(pAbilities, 0, sizeof(*pAbilities));
+       /* fill out standard constant fields */
+       pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
+       pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
+diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
+index 9df78e2..01ba9ae 100644
+--- a/drivers/char/nvram.c
++++ b/drivers/char/nvram.c
+@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
+       spin_unlock_irq(&rtc_lock);
+-      if (copy_to_user(buf, contents, tmp - contents))
++      if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
+               return -EFAULT;
+       *ppos = i;
+diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
+index 5c5cc00..ac9edb7 100644
+--- a/drivers/char/pcmcia/synclink_cs.c
++++ b/drivers/char/pcmcia/synclink_cs.c
+@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
+-                       __FILE__, __LINE__, info->device_name, port->count);
++                       __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
+-      WARN_ON(!port->count);
++      WARN_ON(!atomic_read(&port->count));
+       if (tty_port_close_start(port, tty, filp) == 0)
+               goto cleanup;
+@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+ cleanup:
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
+-                      tty->driver->name, port->count);
++                      tty->driver->name, atomic_read(&port->count));
+ }
+ /* Wait until the transmitter is empty.
+@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
+-                       __FILE__, __LINE__, tty->driver->name, port->count);
++                       __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
+       /* If port is closing, signal caller to try again */
+       if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
+@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+               goto cleanup;
+       }
+       spin_lock(&port->lock);
+-      port->count++;
++      atomic_inc(&port->count);
+       spin_unlock(&port->lock);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (port->count == 1) {
++      if (atomic_read(&port->count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info, tty);
+               if (retval < 0)
+@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 35487e8..dac8bd1 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -272,8 +272,13 @@
+ /*
+  * Configuration information
+  */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define INPUT_POOL_WORDS 512
++#define OUTPUT_POOL_WORDS 128
++#else
+ #define INPUT_POOL_WORDS 128
+ #define OUTPUT_POOL_WORDS 32
++#endif
+ #define SEC_XFER_SIZE 512
+ #define EXTRACT_SIZE 10
+@@ -313,10 +318,17 @@ static struct poolinfo {
+       int poolwords;
+       int tap1, tap2, tap3, tap4, tap5;
+ } poolinfo_table[] = {
++#ifdef CONFIG_GRKERNSEC_RANDNET
++      /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
++      { 512,  411,    308,    208,    104,    1 },
++      /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
++      { 128,  103,    76,     51,     25,     1 },
++#else
+       /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+       { 128,  103,    76,     51,     25,     1 },
+       /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+       { 32,   26,     20,     14,     7,      1 },
++#endif
+ #if 0
+       /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
+       { 2048, 1638,   1231,   819,    411,    1 },
+@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+               input_rotate += i ? 7 : 14;
+       }
+-      ACCESS_ONCE(r->input_rotate) = input_rotate;
+-      ACCESS_ONCE(r->add_ptr) = i;
++      ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
++      ACCESS_ONCE_RW(r->add_ptr) = i;
+       smp_wmb();
+       if (out)
+@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+               extract_buf(r, tmp);
+               i = min_t(int, nbytes, EXTRACT_SIZE);
+-              if (copy_to_user(buf, tmp, i)) {
++              if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+                       ret = -EFAULT;
+                       break;
+               }
+@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
+ static int proc_do_uuid(ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      ctl_table fake_table;
++      ctl_table_no_const fake_table;
+       unsigned char buf[64], tmp_uuid[16], *uuid;
+       uuid = table->data;
+diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
+index bf2349db..5456d53 100644
+--- a/drivers/char/sonypi.c
++++ b/drivers/char/sonypi.c
+@@ -54,6 +54,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
++#include <asm/local.h>
+ #include <linux/sonypi.h>
+@@ -490,7 +491,7 @@ static struct sonypi_device {
+       spinlock_t fifo_lock;
+       wait_queue_head_t fifo_proc_list;
+       struct fasync_struct *fifo_async;
+-      int open_count;
++      local_t open_count;
+       int model;
+       struct input_dev *input_jog_dev;
+       struct input_dev *input_key_dev;
+@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+       mutex_lock(&sonypi_device.lock);
+-      sonypi_device.open_count--;
++      local_dec(&sonypi_device.open_count);
+       mutex_unlock(&sonypi_device.lock);
+       return 0;
+ }
+@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
+ {
+       mutex_lock(&sonypi_device.lock);
+       /* Flush input queue on first open */
+-      if (!sonypi_device.open_count)
++      if (!local_read(&sonypi_device.open_count))
+               kfifo_reset(&sonypi_device.fifo);
+-      sonypi_device.open_count++;
++      local_inc(&sonypi_device.open_count);
+       mutex_unlock(&sonypi_device.lock);
+       return 0;
+diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
+index 64420b3..5c40b56 100644
+--- a/drivers/char/tpm/tpm_acpi.c
++++ b/drivers/char/tpm/tpm_acpi.c
+@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
+       virt = acpi_os_map_memory(start, len);
+       if (!virt) {
+               kfree(log->bios_event_log);
++              log->bios_event_log = NULL;
+               printk("%s: ERROR - Unable to map memory\n", __func__);
+               return -EIO;
+       }
+-      memcpy_fromio(log->bios_event_log, virt, len);
++      memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
+       acpi_os_unmap_memory(virt, len);
+       return 0;
+diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
+index 84ddc55..1d32f1e 100644
+--- a/drivers/char/tpm/tpm_eventlog.c
++++ b/drivers/char/tpm/tpm_eventlog.c
+@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+       event = addr;
+       if ((event->event_type == 0 && event->event_size == 0) ||
+-          ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
++          (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
+               return NULL;
+       return addr;
+@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+               return NULL;
+       if ((event->event_type == 0 && event->event_size == 0) ||
+-          ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
++          (event->event_size >= limit - v - sizeof(struct tcpa_event)))
+               return NULL;
+       (*pos)++;
+@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+       int i;
+       for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+-              seq_putc(m, data[i]);
++              if (!seq_putc(m, data[i]))
++                      return -EFAULT;
+       return 0;
+ }
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index fc45567..fa2a590 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
+       if (to_user) {
+               ssize_t ret;
+-              ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
++              ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
+               if (ret)
+                       return -EFAULT;
+       } else {
+@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+       if (!port_has_data(port) && !port->host_connected)
+               return 0;
+-      return fill_readbuf(port, ubuf, count, true);
++      return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
+ }
+ static int wait_port_writable(struct port *port, bool nonblock)
+diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
+index a33f46f..a720eed 100644
+--- a/drivers/clk/clk-composite.c
++++ b/drivers/clk/clk-composite.c
+@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
+       struct clk *clk;
+       struct clk_init_data init;
+       struct clk_composite *composite;
+-      struct clk_ops *clk_composite_ops;
++      clk_ops_no_const *clk_composite_ops;
+       composite = kzalloc(sizeof(*composite), GFP_KERNEL);
+       if (!composite) {
+diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
+index bd11315..7f87098 100644
+--- a/drivers/clk/socfpga/clk.c
++++ b/drivers/clk/socfpga/clk.c
+@@ -22,6 +22,7 @@
+ #include <linux/clk-provider.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
++#include <asm/pgtable.h>
+ /* Clock Manager offsets */
+ #define CLKMGR_CTRL    0x0
+@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
+       if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
+                       strcmp(clk_name, "sdram_pll")) {
+               socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+-              clk_pll_ops.enable = clk_gate_ops.enable;
+-              clk_pll_ops.disable = clk_gate_ops.disable;
++              pax_open_kernel();
++              *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
++              *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
++              pax_close_kernel();
+       }
+       clk = clk_register(NULL, &socfpga_clk->hw.hw);
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index a2b2541..bc1e7ff 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
++static struct notifier_block arch_timer_cpu_nb = {
+       .notifier_call = arch_timer_cpu_notify,
+ };
+diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
+index 350f493..489479e 100644
+--- a/drivers/clocksource/bcm_kona_timer.c
++++ b/drivers/clocksource/bcm_kona_timer.c
+@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
+       .handler = kona_timer_interrupt,
+ };
+-static void __init kona_timer_init(void)
++static void __init kona_timer_init(struct device_node *np)
+ {
+       kona_timers_init();
+       kona_timer_clockevents_init();
+diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
+index ade7513..069445f 100644
+--- a/drivers/clocksource/metag_generic.c
++++ b/drivers/clocksource/metag_generic.c
+@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
++static struct notifier_block arch_timer_cpu_nb = {
+       .notifier_call = arch_timer_cpu_notify,
+ };
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index edc089e..bc7c0bc 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
+       return sprintf(buf, "%u\n", boost_enabled);
+ }
+-static struct global_attr global_boost = __ATTR(boost, 0644,
++static global_attr_no_const global_boost = __ATTR(boost, 0644,
+                                               show_global_boost,
+                                               store_global_boost);
+@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+       data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
+       per_cpu(acfreq_data, cpu) = data;
+-      if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+-              acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++      if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
++              pax_open_kernel();
++              *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       result = acpi_processor_register_performance(data->acpi_data, cpu);
+       if (result)
+@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+               policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+               break;
+       case ACPI_ADR_SPACE_FIXED_HARDWARE:
+-              acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
++              pax_open_kernel();
++              *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
++              pax_close_kernel();
+               policy->cur = get_cur_freq_on_cpu(cpu);
+               break;
+       default:
+@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+       acpi_processor_notify_smm(THIS_MODULE);
+       /* Check for APERF/MPERF support in hardware */
+-      if (boot_cpu_has(X86_FEATURE_APERFMPERF))
+-              acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
++      if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
++              pax_open_kernel();
++              *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
++              pax_close_kernel();
++      }
+       pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
+       for (i = 0; i < perf->state_count; i++)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 6485547..477033e 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata cpufreq_cpu_notifier = {
++static struct notifier_block cpufreq_cpu_notifier = {
+     .notifier_call = cpufreq_cpu_callback,
+ };
+@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+       pr_debug("trying to register driver %s\n", driver_data->name);
+-      if (driver_data->setpolicy)
+-              driver_data->flags |= CPUFREQ_CONST_LOOPS;
++      if (driver_data->setpolicy) {
++              pax_open_kernel();
++              *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+       if (cpufreq_driver) {
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index a86ff72..aad2b03 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+       struct dbs_data *dbs_data;
+       struct od_cpu_dbs_info_s *od_dbs_info = NULL;
+       struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+-      struct od_ops *od_ops = NULL;
++      const struct od_ops *od_ops = NULL;
+       struct od_dbs_tuners *od_tuners = NULL;
+       struct cs_dbs_tuners *cs_tuners = NULL;
+       struct cpu_dbs_common_info *cpu_cdbs;
+@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+               if ((cdata->governor == GOV_CONSERVATIVE) &&
+                               (!policy->governor->initialized)) {
+-                      struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
++                      const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+                       cpufreq_register_notifier(cs_ops->notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+                       if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+                               (policy->governor->initialized == 1)) {
+-                              struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
++                              const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+                               cpufreq_unregister_notifier(cs_ops->notifier_block,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
+index 0d9e6be..461fd3b 100644
+--- a/drivers/cpufreq/cpufreq_governor.h
++++ b/drivers/cpufreq/cpufreq_governor.h
+@@ -204,7 +204,7 @@ struct common_dbs_data {
+       void (*exit)(struct dbs_data *dbs_data);
+       /* Governor specific ops, see below */
+-      void *gov_ops;
++      const void *gov_ops;
+ };
+ /* Governer Per policy data */
+diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
+index c087347..dad6268 100644
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
+               (struct cpufreq_policy *, unsigned int, unsigned int),
+               unsigned int powersave_bias)
+ {
+-      od_ops.powersave_bias_target = f;
++      pax_open_kernel();
++      *(void **)&od_ops.powersave_bias_target = f;
++      pax_close_kernel();
+       od_set_powersave_bias(powersave_bias);
+ }
+ EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+ void od_unregister_powersave_bias_handler(void)
+ {
+-      od_ops.powersave_bias_target = generic_powersave_bias_target;
++      pax_open_kernel();
++      *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
++      pax_close_kernel();
+       od_set_powersave_bias(0);
+ }
+ EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index bfd6273..e39dd63 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ }
+ /* priority=1 so this will get called before cpufreq_remove_dev */
+-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
++static struct notifier_block cpufreq_stat_cpu_notifier = {
+       .notifier_call = cpufreq_stat_cpu_callback,
+       .priority = 1,
+ };
+diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
+index 421ef37..e708530c 100644
+--- a/drivers/cpufreq/p4-clockmod.c
++++ b/drivers/cpufreq/p4-clockmod.c
+@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+               case 0x0F: /* Core Duo */
+               case 0x16: /* Celeron Core */
+               case 0x1C: /* Atom */
+-                      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_open_kernel();
++                      *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_close_kernel();
+                       return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
+               case 0x0D: /* Pentium M (Dothan) */
+-                      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_open_kernel();
++                      *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_close_kernel();
+                       /* fall through */
+               case 0x09: /* Pentium M (Banias) */
+                       return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
+@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+       /* on P-4s, the TSC runs with constant frequency independent whether
+        * throttling is active or not. */
+-      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++      pax_open_kernel();
++      *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++      pax_close_kernel();
+       if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
+               printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
+diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
+index c71ee14..7c2e183 100644
+--- a/drivers/cpufreq/sparc-us3-cpufreq.c
++++ b/drivers/cpufreq/sparc-us3-cpufreq.c
+@@ -18,14 +18,12 @@
+ #include <asm/head.h>
+ #include <asm/timer.h>
+-static struct cpufreq_driver *cpufreq_us3_driver;
+-
+ struct us3_freq_percpu_info {
+       struct cpufreq_frequency_table table[4];
+ };
+ /* Indexed by cpu number. */
+-static struct us3_freq_percpu_info *us3_freq_table;
++static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
+ /* UltraSPARC-III has three dividers: 1, 2, and 32.  These are controlled
+  * in the Safari config register.
+@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+ static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+ {
+-      if (cpufreq_us3_driver)
+-              us3_set_cpu_divider_index(policy, 0);
++      us3_set_cpu_divider_index(policy->cpu, 0);
+       return 0;
+ }
++static int __init us3_freq_init(void);
++static void __exit us3_freq_exit(void);
++
++static struct cpufreq_driver cpufreq_us3_driver = {
++      .init   = us3_freq_cpu_init,
++      .verify = us3_freq_verify,
++      .target = us3_freq_target,
++      .get    = us3_freq_get,
++      .exit   = us3_freq_cpu_exit,
++      .owner  = THIS_MODULE,
++      .name   = "UltraSPARC-III",
++
++};
++
+ static int __init us3_freq_init(void)
+ {
+       unsigned long manuf, impl, ver;
+@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
+           (impl == CHEETAH_IMPL ||
+            impl == CHEETAH_PLUS_IMPL ||
+            impl == JAGUAR_IMPL ||
+-           impl == PANTHER_IMPL)) {
+-              struct cpufreq_driver *driver;
+-
+-              ret = -ENOMEM;
+-              driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+-              if (!driver)
+-                      goto err_out;
+-
+-              us3_freq_table = kzalloc(
+-                      (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+-                      GFP_KERNEL);
+-              if (!us3_freq_table)
+-                      goto err_out;
+-
+-              driver->init = us3_freq_cpu_init;
+-              driver->verify = us3_freq_verify;
+-              driver->target = us3_freq_target;
+-              driver->get = us3_freq_get;
+-              driver->exit = us3_freq_cpu_exit;
+-              driver->owner = THIS_MODULE,
+-              strcpy(driver->name, "UltraSPARC-III");
+-
+-              cpufreq_us3_driver = driver;
+-              ret = cpufreq_register_driver(driver);
+-              if (ret)
+-                      goto err_out;
+-
+-              return 0;
+-
+-err_out:
+-              if (driver) {
+-                      kfree(driver);
+-                      cpufreq_us3_driver = NULL;
+-              }
+-              kfree(us3_freq_table);
+-              us3_freq_table = NULL;
+-              return ret;
+-      }
++           impl == PANTHER_IMPL))
++              return cpufreq_register_driver(&cpufreq_us3_driver);
+       return -ENODEV;
+ }
+ static void __exit us3_freq_exit(void)
+ {
+-      if (cpufreq_us3_driver) {
+-              cpufreq_unregister_driver(cpufreq_us3_driver);
+-              kfree(cpufreq_us3_driver);
+-              cpufreq_us3_driver = NULL;
+-              kfree(us3_freq_table);
+-              us3_freq_table = NULL;
+-      }
++      cpufreq_unregister_driver(&cpufreq_us3_driver);
+ }
+ MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
+index 618e6f4..e89d915 100644
+--- a/drivers/cpufreq/speedstep-centrino.c
++++ b/drivers/cpufreq/speedstep-centrino.c
+@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
+           !cpu_has(cpu, X86_FEATURE_EST))
+               return -ENODEV;
+-      if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
+-              centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
++      if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
++              pax_open_kernel();
++              *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       if (policy->cpu != 0)
+               return -ENODEV;
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index c3a93fe..e808f24 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
+ static void poll_idle_init(struct cpuidle_driver *drv)
+ {
+-      struct cpuidle_state *state = &drv->states[0];
++      cpuidle_state_no_const *state = &drv->states[0];
+       snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+       snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
+index ea2f8e7..70ac501 100644
+--- a/drivers/cpuidle/governor.c
++++ b/drivers/cpuidle/governor.c
+@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
+       mutex_lock(&cpuidle_lock);
+       if (__cpuidle_find_governor(gov->name) == NULL) {
+               ret = 0;
+-              list_add_tail(&gov->governor_list, &cpuidle_governors);
++              pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
+               if (!cpuidle_curr_governor ||
+                   cpuidle_curr_governor->rating < gov->rating)
+                       cpuidle_switch_governor(gov);
+@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
+               new_gov = cpuidle_replace_governor(gov->rating);
+               cpuidle_switch_governor(new_gov);
+       }
+-      list_del(&gov->governor_list);
++      pax_list_del((struct list_head *)&gov->governor_list);
+       mutex_unlock(&cpuidle_lock);
+ }
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 428754a..8bdf9cc 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
+       NULL
+ };
+-static struct attribute_group cpuidle_attr_group = {
++static attribute_group_no_const cpuidle_attr_group = {
+       .attrs = cpuidle_default_attrs,
+       .name = "cpuidle",
+ };
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 3b36797..db0b0c0 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+                                               GFP_KERNEL);
+       devfreq->last_stat_updated = jiffies;
+-      dev_set_name(&devfreq->dev, dev_name(dev));
++      dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+       err = device_register(&devfreq->dev);
+       if (err) {
+               put_device(&devfreq->dev);
+@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
+               goto err_out;
+       }
+-      list_add(&governor->node, &devfreq_governor_list);
++      pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
+       list_for_each_entry(devfreq, &devfreq_list, node) {
+               int ret = 0;
+@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
+               }
+       }
+-      list_del(&governor->node);
++      pax_list_del((struct list_head *)&governor->node);
+ err_out:
+       mutex_unlock(&devfreq_list_lock);
+diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
+index b70709b..1d8d02a 100644
+--- a/drivers/dma/sh/shdma.c
++++ b/drivers/dma/sh/shdma.c
+@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
+       return ret;
+ }
+-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
++static struct notifier_block sh_dmae_nmi_notifier = {
+       .notifier_call  = sh_dmae_nmi_handler,
+       /* Run before NMI debug handler and KGDB */
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index c4d700a..0b57abd 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
+ struct dev_ch_attribute {
+       struct device_attribute attr;
+       int channel;
+-};
++} __do_const;
+ #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
+       struct dev_ch_attribute dev_attr_legacy_##_name = \
+@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+       }
+       if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
++              pax_open_kernel();
+               if (mci->get_sdram_scrub_rate) {
+-                      dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+-                      dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
++                      *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
++                      *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+               }
+               if (mci->set_sdram_scrub_rate) {
+-                      dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+-                      dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
++                      *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
++                      *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+               }
++              pax_close_kernel();
+               err = device_create_file(&mci->dev,
+                                        &dev_attr_sdram_scrub_rate);
+               if (err) {
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index e8658e4..22746d6 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1;              /* log PCI parity errors */
+ static int edac_pci_log_npe = 1;      /* log PCI non-parity error errors */
+ static int edac_pci_poll_msec = 1000; /* one second workq period */
+-static atomic_t pci_parity_count = ATOMIC_INIT(0);
+-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
+ static struct kobject *edac_pci_top_main_kobj;
+ static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
+       void *value;
+        ssize_t(*show) (void *, char *);
+        ssize_t(*store) (void *, const char *, size_t);
+-};
++} __do_const;
+ /* Set of show/store abstract level functions for PCI Parity object */
+ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                       edac_printk(KERN_CRIT, EDAC_PCI,
+                               "Signaled System Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_nonparity_count);
++                      atomic_inc_unchecked(&pci_nonparity_count);
+               }
+               if (status & (PCI_STATUS_PARITY)) {
+@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               "Master Data Parity Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_parity_count);
++                      atomic_inc_unchecked(&pci_parity_count);
+               }
+               if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               "Detected Parity Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_parity_count);
++                      atomic_inc_unchecked(&pci_parity_count);
+               }
+       }
+@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+                                       "Signaled System Error on %s\n",
+                                       pci_name(dev));
+-                              atomic_inc(&pci_nonparity_count);
++                              atomic_inc_unchecked(&pci_nonparity_count);
+                       }
+                       if (status & (PCI_STATUS_PARITY)) {
+@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                                       "Master Data Parity Error on "
+                                       "%s\n", pci_name(dev));
+-                              atomic_inc(&pci_parity_count);
++                              atomic_inc_unchecked(&pci_parity_count);
+                       }
+                       if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                                       "Detected Parity Error on %s\n",
+                                       pci_name(dev));
+-                              atomic_inc(&pci_parity_count);
++                              atomic_inc_unchecked(&pci_parity_count);
+                       }
+               }
+       }
+@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
+       if (!check_pci_errors)
+               return;
+-      before_count = atomic_read(&pci_parity_count);
++      before_count = atomic_read_unchecked(&pci_parity_count);
+       /* scan all PCI devices looking for a Parity Error on devices and
+        * bridges.
+@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
+       /* Only if operator has selected panic on PCI Error */
+       if (edac_pci_get_panic_on_pe()) {
+               /* If the count is different 'after' from 'before' */
+-              if (before_count != atomic_read(&pci_parity_count))
++              if (before_count != atomic_read_unchecked(&pci_parity_count))
+                       panic("EDAC: PCI Parity Error");
+       }
+ }
+diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
+index 51b7e3a..aa8a3e8 100644
+--- a/drivers/edac/mce_amd.h
++++ b/drivers/edac/mce_amd.h
+@@ -77,7 +77,7 @@ struct amd_decoder_ops {
+       bool (*mc0_mce)(u16, u8);
+       bool (*mc1_mce)(u16, u8);
+       bool (*mc2_mce)(u16, u8);
+-};
++} __no_const;
+ void amd_report_gart_errors(bool);
+ void amd_register_ecc_decoder(void (*f)(int, struct mce *));
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 57ea7f4..789e3c3 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
+ void fw_core_remove_card(struct fw_card *card)
+ {
+-      struct fw_card_driver dummy_driver = dummy_driver_template;
++      fw_card_driver_no_const dummy_driver = dummy_driver_template;
+       card->driver->update_phy_reg(card, 4,
+                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index 664a6ff..af13580 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
+ struct config_rom_attribute {
+       struct device_attribute attr;
+       u32 key;
+-};
++} __do_const;
+ static ssize_t show_immediate(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
+index 28a94c7..58da63a 100644
+--- a/drivers/firewire/core-transaction.c
++++ b/drivers/firewire/core-transaction.c
+@@ -38,6 +38,7 @@
+ #include <linux/timer.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
++#include <linux/sched.h>
+ #include <asm/byteorder.h>
+diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
+index 515a42c..5ecf3ba 100644
+--- a/drivers/firewire/core.h
++++ b/drivers/firewire/core.h
+@@ -111,6 +111,7 @@ struct fw_card_driver {
+       int (*stop_iso)(struct fw_iso_context *ctx);
+ };
++typedef struct fw_card_driver __no_const fw_card_driver_no_const;
+ void fw_card_initialize(struct fw_card *card,
+               const struct fw_card_driver *driver, struct device *device);
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 94a58a0..f5eba42 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -16,7 +16,7 @@
+ struct dmi_device_attribute{
+       struct device_attribute dev_attr;
+       int field;
+-};
++} __do_const;
+ #define to_dmi_dev_attr(_dev_attr) \
+       container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index b95159b..841ae55 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
+               }
+       }
+       else {
+-              /*
+-               * no iounmap() for that ioremap(); it would be a no-op, but
+-               * it's so early in setup that sucker gets confused into doing
+-               * what it shouldn't if we actually call it.
+-               */
+               p = dmi_ioremap(0xF0000, 0x10000);
+               if (p == NULL)
+                       goto error;
+@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+       if (buf == NULL)
+               return -1;
+-      dmi_table(buf, dmi_len, dmi_num, decode, private_data);
++      dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
+       iounmap(buf);
+       return 0;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 5145fa3..0d3babd 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
+ };
+ static struct efivars generic_efivars;
+-static struct efivar_operations generic_ops;
++static efivar_operations_no_const generic_ops __read_only;
+ static int generic_ops_register(void)
+ {
+-      generic_ops.get_variable = efi.get_variable;
+-      generic_ops.set_variable = efi.set_variable;
+-      generic_ops.get_next_variable = efi.get_next_variable;
+-      generic_ops.query_variable_store = efi_query_variable_store;
++      pax_open_kernel();
++      *(void **)&generic_ops.get_variable = efi.get_variable;
++      *(void **)&generic_ops.set_variable = efi.set_variable;
++      *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
++      *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
++      pax_close_kernel();
+       return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+ }
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 8bd1bb6..c48b0c6 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ static int
+ create_efivars_bin_attributes(void)
+ {
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       int error;
+       /* new_var */
+diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
+index 2a90ba6..07f3733 100644
+--- a/drivers/firmware/google/memconsole.c
++++ b/drivers/firmware/google/memconsole.c
+@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
+       if (!found_memconsole())
+               return -ENODEV;
+-      memconsole_bin_attr.size = memconsole_length;
++      pax_open_kernel();
++      *(size_t *)&memconsole_bin_attr.size = memconsole_length;
++      pax_close_kernel();
+       ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
+index e16d932..f0206ef 100644
+--- a/drivers/gpio/gpio-ich.c
++++ b/drivers/gpio/gpio-ich.c
+@@ -69,7 +69,7 @@ struct ichx_desc {
+       /* Some chipsets have quirks, let these use their own request/get */
+       int (*request)(struct gpio_chip *chip, unsigned offset);
+       int (*get)(struct gpio_chip *chip, unsigned offset);
+-};
++} __do_const;
+ static struct {
+       spinlock_t lock;
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
+index 9902732..64b62dd 100644
+--- a/drivers/gpio/gpio-vr41xx.c
++++ b/drivers/gpio/gpio-vr41xx.c
+@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
+       printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+              maskl, pendl, maskh, pendh);
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+       return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index ed1334e..ee0dd42 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+       struct drm_crtc *tmp;
+       int crtc_mask = 1;
+-      WARN(!crtc, "checking null crtc?\n");
++      BUG_ON(!crtc);
+       dev = crtc->dev;
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 9cc247f..36aa285 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
+ /**
+  * Copy and IOCTL return string to user space
+  */
+-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+ {
+       int len;
+@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev;
+       const struct drm_ioctl_desc *ioctl = NULL;
+-      drm_ioctl_t *func;
++      drm_ioctl_no_const_t func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char stack_kdata[128];
+@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
+               return -ENODEV;
+       atomic_inc(&dev->ioctl_count);
+-      atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+       ++file_priv->ioctl_count;
+       if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 429e07d..e681a2c 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
+       }
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+-              atomic_set(&dev->counts[i], 0);
++              atomic_set_unchecked(&dev->counts[i], 0);
+       dev->sigdata.lock = NULL;
+@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
+       if (drm_device_is_unplugged(dev))
+               return -ENODEV;
+-      if (!dev->open_count++)
++      if (local_inc_return(&dev->open_count) == 1)
+               need_setup = 1;
+       mutex_lock(&dev->struct_mutex);
+       old_imapping = inode->i_mapping;
+@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
+       retcode = drm_open_helper(inode, filp, dev);
+       if (retcode)
+               goto err_undo;
+-      atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
+       if (need_setup) {
+               retcode = drm_setup(dev);
+               if (retcode)
+@@ -166,7 +166,7 @@ err_undo:
+       iput(container_of(dev->dev_mapping, struct inode, i_data));
+       dev->dev_mapping = old_mapping;
+       mutex_unlock(&dev->struct_mutex);
+-      dev->open_count--;
++      local_dec(&dev->open_count);
+       return retcode;
+ }
+ EXPORT_SYMBOL(drm_open);
+@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
+       mutex_lock(&drm_global_mutex);
+-      DRM_DEBUG("open_count = %d\n", dev->open_count);
++      DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, file_priv);
+@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
+        * Begin inline drm_release
+        */
+-      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
++      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+-                dev->open_count);
++                local_read(&dev->open_count));
+       /* Release any auth tokens that might point to this file_priv,
+          (do that under the drm_global_mutex) */
+@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
+        * End inline drm_release
+        */
+-      atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+-      if (!--dev->open_count) {
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
++      if (local_dec_and_test(&dev->open_count)) {
+               if (atomic_read(&dev->ioctl_count)) {
+                       DRM_ERROR("Device busy: %d\n",
+                                 atomic_read(&dev->ioctl_count));
+diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
+index f731116..629842c 100644
+--- a/drivers/gpu/drm/drm_global.c
++++ b/drivers/gpu/drm/drm_global.c
+@@ -36,7 +36,7 @@
+ struct drm_global_item {
+       struct mutex mutex;
+       void *object;
+-      int refcount;
++      atomic_t refcount;
+ };
+ static struct drm_global_item glob[DRM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void drm_global_init(void)
+               struct drm_global_item *item = &glob[i];
+               mutex_init(&item->mutex);
+               item->object = NULL;
+-              item->refcount = 0;
++              atomic_set(&item->refcount, 0);
+       }
+ }
+@@ -59,7 +59,7 @@ void drm_global_release(void)
+       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+               struct drm_global_item *item = &glob[i];
+               BUG_ON(item->object != NULL);
+-              BUG_ON(item->refcount != 0);
++              BUG_ON(atomic_read(&item->refcount) != 0);
+       }
+ }
+@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+       void *object;
+       mutex_lock(&item->mutex);
+-      if (item->refcount == 0) {
++      if (atomic_read(&item->refcount) == 0) {
+               item->object = kzalloc(ref->size, GFP_KERNEL);
+               if (unlikely(item->object == NULL)) {
+                       ret = -ENOMEM;
+@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+                       goto out_err;
+       }
+-      ++item->refcount;
++      atomic_inc(&item->refcount);
+       ref->object = item->object;
+       object = item->object;
+       mutex_unlock(&item->mutex);
+@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
+       struct drm_global_item *item = &glob[ref->global_type];
+       mutex_lock(&item->mutex);
+-      BUG_ON(item->refcount == 0);
++      BUG_ON(atomic_read(&item->refcount) == 0);
+       BUG_ON(ref->object != item->object);
+-      if (--item->refcount == 0) {
++      if (atomic_dec_and_test(&item->refcount)) {
+               ref->release(ref);
+               item->object = NULL;
+       }
+diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
+index d4b20ce..77a8d41 100644
+--- a/drivers/gpu/drm/drm_info.c
++++ b/drivers/gpu/drm/drm_info.c
+@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
+       struct drm_local_map *map;
+       struct drm_map_list *r_list;
+-      /* Hardcoded from _DRM_FRAME_BUFFER,
+-         _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+-         _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+-      const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++      static const char * const types[] = {
++              [_DRM_FRAME_BUFFER] = "FB",
++              [_DRM_REGISTERS] = "REG",
++              [_DRM_SHM] = "SHM",
++              [_DRM_AGP] = "AGP",
++              [_DRM_SCATTER_GATHER] = "SG",
++              [_DRM_CONSISTENT] = "PCI",
++              [_DRM_GEM] = "GEM" };
+       const char *type;
+       int i;
+@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
+               map = r_list->map;
+               if (!map)
+                       continue;
+-              if (map->type < 0 || map->type > 5)
++              if (map->type >= ARRAY_SIZE(types))
+                       type = "??";
+               else
+                       type = types[map->type];
+@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
+                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
+                          vma->vm_flags & VM_IO ? 'i' : '-',
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                         0);
++#else
+                          vma->vm_pgoff);
++#endif
+ #if defined(__i386__)
+               pgprot = pgprot_val(vma->vm_page_prot);
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index 2f4c434..dd12cd2 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+-      list = (struct drm_buf_desc *) (request + 1);
++      list = (struct drm_buf_desc __user *) (request + 1);
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(nbytes);
+       if (!access_ok(VERIFY_WRITE, request, nbytes))
+               return -EFAULT;
+-      list = (struct drm_buf_pub *) (request + 1);
++      list = (struct drm_buf_pub __user *) (request + 1);
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+       return 0;
+ }
+-drm_ioctl_compat_t *drm_compat_ioctls[] = {
++drm_ioctl_compat_t drm_compat_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn;
+       int ret;
+       /* Assume that ioctls without an explicit compat routine will just
+@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+       if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+               return drm_ioctl(filp, cmd, arg);
+-      fn = drm_compat_ioctls[nr];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (drm_compat_ioctls[nr] != NULL)
++              ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index e77bd8b..1571b85 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
+                       stats->data[i].value =
+                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+               else
+-                      stats->data[i].value = atomic_read(&dev->counts[i]);
++                      stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
+               stats->data[i].type = dev->types[i];
+       }
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index d752c96..fe08455 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+               if (drm_lock_take(&master->lock, lock->context)) {
+                       master->lock.file_priv = file_priv;
+                       master->lock.lock_time = jiffies;
+-                      atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++                      atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+                       break;  /* Got lock */
+               }
+@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+               return -EINVAL;
+       }
+-      atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+       if (drm_lock_free(&master->lock, lock->context)) {
+               /* FIXME: Should really bail out here. */
+diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
+index 16f3ec5..b28f9ca 100644
+--- a/drivers/gpu/drm/drm_stub.c
++++ b/drivers/gpu/drm/drm_stub.c
+@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
+       drm_device_set_unplugged(dev);
+-      if (dev->open_count == 0) {
++      if (local_read(&dev->open_count) == 0) {
+               drm_put_dev(dev);
+       }
+       mutex_unlock(&drm_global_mutex);
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index 0229665..f61329c 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+ int drm_sysfs_device_add(struct drm_minor *minor)
+ {
+       int err;
+-      char *minor_str;
++      const char *minor_str;
+       minor->kdev.parent = minor->dev->dev;
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index 004ecdf..db1f6e0 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
+                                dma->buflist[vertex->idx],
+                                vertex->discard, vertex->used);
+-      atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+-      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+       sarea_priv->last_enqueue = dev_priv->counter - 1;
+       sarea_priv->last_dispatch = (int)hw_status[5];
+@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
+       i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+                            mc->last_render);
+-      atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+-      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+       sarea_priv->last_enqueue = dev_priv->counter - 1;
+       sarea_priv->last_dispatch = (int)hw_status[5];
+diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
+index 6e0acad..93c8289 100644
+--- a/drivers/gpu/drm/i810/i810_drv.h
++++ b/drivers/gpu/drm/i810/i810_drv.h
+@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
+       int page_flipping;
+       wait_queue_head_t irq_queue;
+-      atomic_t irq_received;
+-      atomic_t irq_emitted;
++      atomic_unchecked_t irq_received;
++      atomic_unchecked_t irq_emitted;
+       int front_offset;
+ } drm_i810_private_t;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index e913d32..4d9b351 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+                          I915_READ(GTIMR));
+       }
+       seq_printf(m, "Interrupts received: %d\n",
+-                 atomic_read(&dev_priv->irq_received));
++                 atomic_read_unchecked(&dev_priv->irq_received));
+       for_each_ring(ring, dev_priv, i) {
+               if (IS_GEN6(dev) || IS_GEN7(dev)) {
+                       seq_printf(m,
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 17d9b0b..860e6d9 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+       bool can_switch;
+       spin_lock(&dev->count_lock);
+-      can_switch = (dev->open_count == 0);
++      can_switch = (local_read(&dev->open_count) == 0);
+       spin_unlock(&dev->count_lock);
+       return can_switch;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 47d8b68..52f5d8d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
+       drm_dma_handle_t *status_page_dmah;
+       struct resource mch_res;
+-      atomic_t irq_received;
++      atomic_unchecked_t irq_received;
+       /* protects the irq masks */
+       spinlock_t irq_lock;
+@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
+               struct drm_i915_private *dev_priv, unsigned port);
+ extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+ extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+ {
+       return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 117ce38..eefd237 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+ static int
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+-                 int count)
++                 unsigned int count)
+ {
+-      int i;
++      unsigned int i;
+       int relocs_total = 0;
+       int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
+index 3c59584..500f2e9 100644
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
+                        (unsigned long)request);
+ }
+-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
++static drm_ioctl_compat_t i915_compat_ioctls[] = {
+       [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+       [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+       [DRM_I915_GETPARAM] = compat_i915_getparam,
+@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+-              fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
++              drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+               ret = (*fn) (filp, cmd, arg);
+-      else
++      } else
+               ret = drm_ioctl(filp, cmd, arg);
+       return ret;
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index e5e32869..1678f36 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+       int pipe;
+       u32 pipe_stats[I915_MAX_PIPES];
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       while (true) {
+               iir = I915_READ(VLV_IIR);
+@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+       irqreturn_t ret = IRQ_NONE;
+       int i;
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+       int ret = IRQ_NONE;
+       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+-      atomic_set(&dev_priv->irq_received, 0);
++      atomic_set_unchecked(&dev_priv->irq_received, 0);
+       I915_WRITE(HWSTAM, 0xeffe);
+@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+-      atomic_set(&dev_priv->irq_received, 0);
++      atomic_set_unchecked(&dev_priv->irq_received, 0);
+       /* VLV magic */
+       I915_WRITE(VLV_IMR, 0);
+@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+-      atomic_set(&dev_priv->irq_received, 0);
++      atomic_set_unchecked(&dev_priv->irq_received, 0);
+       for_each_pipe(pipe)
+               I915_WRITE(PIPESTAT(pipe), 0);
+@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       iir = I915_READ16(IIR);
+       if (iir == 0)
+@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+-      atomic_set(&dev_priv->irq_received, 0);
++      atomic_set_unchecked(&dev_priv->irq_received, 0);
+       if (I915_HAS_HOTPLUG(dev)) {
+               I915_WRITE(PORT_HOTPLUG_EN, 0);
+@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
+               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+       int pipe, ret = IRQ_NONE;
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       iir = I915_READ(IIR);
+       do {
+@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+-      atomic_set(&dev_priv->irq_received, 0);
++      atomic_set_unchecked(&dev_priv->irq_received, 0);
+       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
+               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+-      atomic_inc(&dev_priv->irq_received);
++      atomic_inc_unchecked(&dev_priv->irq_received);
+       iir = I915_READ(IIR);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index eea5982..eeef407 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8935,13 +8935,13 @@ struct intel_quirk {
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_device *dev);
+-};
++} __do_const;
+ /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+ struct intel_dmi_quirk {
+       void (*hook)(struct drm_device *dev);
+       const struct dmi_system_id (*dmi_id_list)[];
+-};
++} __do_const;
+ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+ {
+@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+       return 1;
+ }
+-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++static const struct dmi_system_id intel_dmi_quirks_table[] = {
+       {
+-              .dmi_id_list = &(const struct dmi_system_id[]) {
+-                      {
+-                              .callback = intel_dmi_reverse_brightness,
+-                              .ident = "NCR Corporation",
+-                              .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+-                                          DMI_MATCH(DMI_PRODUCT_NAME, ""),
+-                              },
+-                      },
+-                      { }  /* terminating entry */
++              .callback = intel_dmi_reverse_brightness,
++              .ident = "NCR Corporation",
++              .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
++                          DMI_MATCH(DMI_PRODUCT_NAME, ""),
+               },
++      },
++      { }  /* terminating entry */
++};
++
++static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++      {
++              .dmi_id_list = &intel_dmi_quirks_table,
+               .hook = quirk_invert_brightness,
+       },
+ };
+diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
+index 54558a0..2d97005 100644
+--- a/drivers/gpu/drm/mga/mga_drv.h
++++ b/drivers/gpu/drm/mga/mga_drv.h
+@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
+       u32 clear_cmd;
+       u32 maccess;
+-      atomic_t vbl_received;          /**< Number of vblanks received. */
++      atomic_unchecked_t vbl_received;          /**< Number of vblanks received. */
+       wait_queue_head_t fence_queue;
+-      atomic_t last_fence_retired;
++      atomic_unchecked_t last_fence_retired;
+       u32 next_fence_to_post;
+       unsigned int fb_cpp;
+diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
+index 709e90d..89a1c0d 100644
+--- a/drivers/gpu/drm/mga/mga_ioc32.c
++++ b/drivers/gpu/drm/mga/mga_ioc32.c
+@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+       return 0;
+ }
+-drm_ioctl_compat_t *mga_compat_ioctls[] = {
++drm_ioctl_compat_t mga_compat_ioctls[] = {
+       [DRM_MGA_INIT] = compat_mga_init,
+       [DRM_MGA_GETPARAM] = compat_mga_getparam,
+       [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
+ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+-              fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
++              drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+               ret = (*fn) (filp, cmd, arg);
+-      else
++      } else
+               ret = drm_ioctl(filp, cmd, arg);
+       return ret;
+diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
+index 598c281..60d590e 100644
+--- a/drivers/gpu/drm/mga/mga_irq.c
++++ b/drivers/gpu/drm/mga/mga_irq.c
+@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+       if (crtc != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+       /* VBLANK interrupt */
+       if (status & MGA_VLINEPEN) {
+               MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+-              atomic_inc(&dev_priv->vbl_received);
++              atomic_inc_unchecked(&dev_priv->vbl_received);
+               drm_handle_vblank(dev, 0);
+               handled = 1;
+       }
+@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+               if ((prim_start & ~0x03) != (prim_end & ~0x03))
+                       MGA_WRITE(MGA_PRIMEND, prim_end);
+-              atomic_inc(&dev_priv->last_fence_retired);
++              atomic_inc_unchecked(&dev_priv->last_fence_retired);
+               DRM_WAKEUP(&dev_priv->fence_queue);
+               handled = 1;
+       }
+@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+        * using fences.
+        */
+       DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+-                  (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++                  (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
+                     - *sequence) <= (1 << 23)));
+       *sequence = cur_fence;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 6aa2137..fe8dc55 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_table {
+       const char id;
+       int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+-};
++} __no_const;
+ #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
+index f2b30f8..d0f9a95 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
+@@ -92,7 +92,7 @@ struct nouveau_drm {
+               struct drm_global_reference mem_global_ref;
+               struct ttm_bo_global_ref bo_global_ref;
+               struct ttm_bo_device bdev;
+-              atomic_t validate_sequence;
++              atomic_unchecked_t validate_sequence;
+               int (*move)(struct nouveau_channel *,
+                           struct ttm_buffer_object *,
+                           struct ttm_mem_reg *, struct ttm_mem_reg *);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index b4b4d0c..b7edc15 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+       int ret, i;
+       struct nouveau_bo *res_bo = NULL;
+-      sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
++      sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
+ retry:
+       if (++trycnt > 100000) {
+               NV_ERROR(cli, "%s failed and gave up.\n", __func__);
+@@ -359,7 +359,7 @@ retry:
+               if (ret) {
+                       validate_fini(op, NULL);
+                       if (unlikely(ret == -EAGAIN)) {
+-                              sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
++                              sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
+                               ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+                                                             sequence);
+                               if (!ret)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+index 08214bc..9208577 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+                        unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
++      drm_ioctl_compat_t fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
+index 25d3495..d81aaf6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
+@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+       bool can_switch;
+       spin_lock(&dev->count_lock);
+-      can_switch = (dev->open_count == 0);
++      can_switch = (local_read(&dev->open_count) == 0);
+       spin_unlock(&dev->count_lock);
+       return can_switch;
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
+index 489cb8c..0b8d0d3 100644
+--- a/drivers/gpu/drm/qxl/qxl_ttm.c
++++ b/drivers/gpu/drm/qxl/qxl_ttm.c
+@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
+       }
+ }
+-static struct vm_operations_struct qxl_ttm_vm_ops;
++static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
+ static const struct vm_operations_struct *ttm_vm_ops;
+ static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+               return r;
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
++              pax_open_kernel();
+               qxl_ttm_vm_ops = *ttm_vm_ops;
+               qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
++              pax_close_kernel();
+       }
+       vma->vm_ops = &qxl_ttm_vm_ops;
+       return 0;
+@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
+ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+-      static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+-      static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+-      unsigned i;
++      static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
++              {
++                      .name = "qxl_mem_mm",
++                      .show = &qxl_mm_dump_table,
++              },
++              {
++                      .name = "qxl_surf_mm",
++                      .show = &qxl_mm_dump_table,
++              }
++      };
+-      for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+-              if (i == 0)
+-                      sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+-              else
+-                      sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+-              qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+-              qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+-              qxl_mem_types_list[i].driver_features = 0;
+-              if (i == 0)
+-                      qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+-              else
+-                      qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
++      pax_open_kernel();
++      *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
++      *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
++      pax_close_kernel();
+-      }
+-      return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
++      return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
+ #else
+       return 0;
+ #endif
+diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
+index d4660cf..70dbe65 100644
+--- a/drivers/gpu/drm/r128/r128_cce.c
++++ b/drivers/gpu/drm/r128/r128_cce.c
+@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
+       /* GH: Simple idle check.
+        */
+-      atomic_set(&dev_priv->idle_count, 0);
++      atomic_set_unchecked(&dev_priv->idle_count, 0);
+       /* We don't support anything other than bus-mastering ring mode,
+        * but the ring can be in either AGP or PCI space for the ring
+diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
+index 930c71b..499aded 100644
+--- a/drivers/gpu/drm/r128/r128_drv.h
++++ b/drivers/gpu/drm/r128/r128_drv.h
+@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
+       int is_pci;
+       unsigned long cce_buffers_offset;
+-      atomic_t idle_count;
++      atomic_unchecked_t idle_count;
+       int page_flipping;
+       int current_page;
+       u32 crtc_offset;
+       u32 crtc_offset_cntl;
+-      atomic_t vbl_received;
++      atomic_unchecked_t vbl_received;
+       u32 color_fmt;
+       unsigned int front_offset;
+diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
+index a954c54..9cc595c 100644
+--- a/drivers/gpu/drm/r128/r128_ioc32.c
++++ b/drivers/gpu/drm/r128/r128_ioc32.c
+@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
+       return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ }
+-drm_ioctl_compat_t *r128_compat_ioctls[] = {
++drm_ioctl_compat_t r128_compat_ioctls[] = {
+       [DRM_R128_INIT] = compat_r128_init,
+       [DRM_R128_DEPTH] = compat_r128_depth,
+       [DRM_R128_STIPPLE] = compat_r128_stipple,
+@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
+ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+-              fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
++              drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+               ret = (*fn) (filp, cmd, arg);
+-      else
++      } else
+               ret = drm_ioctl(filp, cmd, arg);
+       return ret;
+diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
+index 2ea4f09..d391371 100644
+--- a/drivers/gpu/drm/r128/r128_irq.c
++++ b/drivers/gpu/drm/r128/r128_irq.c
+@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+       if (crtc != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+       /* VBLANK interrupt */
+       if (status & R128_CRTC_VBLANK_INT) {
+               R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+-              atomic_inc(&dev_priv->vbl_received);
++              atomic_inc_unchecked(&dev_priv->vbl_received);
+               drm_handle_vblank(dev, 0);
+               return IRQ_HANDLED;
+       }
+diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
+index 19bb7e6..de7e2a2 100644
+--- a/drivers/gpu/drm/r128/r128_state.c
++++ b/drivers/gpu/drm/r128/r128_state.c
+@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
+ static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+ {
+-      if (atomic_read(&dev_priv->idle_count) == 0)
++      if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
+               r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+       else
+-              atomic_set(&dev_priv->idle_count, 0);
++              atomic_set_unchecked(&dev_priv->idle_count, 0);
+ }
+ #endif
+diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
+index 5a82b6b..9e69c73 100644
+--- a/drivers/gpu/drm/radeon/mkregtable.c
++++ b/drivers/gpu/drm/radeon/mkregtable.c
+@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
+       regex_t mask_rex;
+       regmatch_t match[4];
+       char buf[1024];
+-      size_t end;
++      long end;
+       int len;
+       int done = 0;
+       int r;
+       unsigned o;
+       struct offset *offset;
+       char last_reg_s[10];
+-      int last_reg;
++      unsigned long last_reg;
+       if (regcomp
+           (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index b0dc0b6..a9bfe9c 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+       bool can_switch;
+       spin_lock(&dev->count_lock);
+-      can_switch = (dev->open_count == 0);
++      can_switch = (local_read(&dev->open_count) == 0);
+       spin_unlock(&dev->count_lock);
+       return can_switch;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
+index b369d42..8dd04eb 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.h
++++ b/drivers/gpu/drm/radeon/radeon_drv.h
+@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
+       /* SW interrupt */
+       wait_queue_head_t swi_queue;
+-      atomic_t swi_emitted;
++      atomic_unchecked_t swi_emitted;
+       int vblank_crtc;
+       uint32_t irq_enable_reg;
+       uint32_t r500_disp_irq_reg;
+diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
+index c180df8..5fd8186 100644
+--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
+@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.param, &request->param)
+-          || __put_user((void __user *)(unsigned long)req32.value,
++          || __put_user((unsigned long)req32.value,
+                         &request->value))
+               return -EFAULT;
+@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ #define compat_radeon_cp_setparam NULL
+ #endif /* X86_64 || IA64 */
+-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
++static drm_ioctl_compat_t radeon_compat_ioctls[] = {
+       [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+       [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+       [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+-              fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
++              drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+               ret = (*fn) (filp, cmd, arg);
+-      else
++      } else
+               ret = drm_ioctl(filp, cmd, arg);
+       return ret;
+diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
+index 8d68e97..9dcfed8 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq.c
++++ b/drivers/gpu/drm/radeon/radeon_irq.c
+@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
+       unsigned int ret;
+       RING_LOCALS;
+-      atomic_inc(&dev_priv->swi_emitted);
+-      ret = atomic_read(&dev_priv->swi_emitted);
++      atomic_inc_unchecked(&dev_priv->swi_emitted);
++      ret = atomic_read_unchecked(&dev_priv->swi_emitted);
+       BEGIN_RING(4);
+       OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
+       drm_radeon_private_t *dev_priv =
+           (drm_radeon_private_t *) dev->dev_private;
+-      atomic_set(&dev_priv->swi_emitted, 0);
++      atomic_set_unchecked(&dev_priv->swi_emitted, 0);
+       DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+       dev->max_vblank_count = 0x001fffff;
+diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
+index 4d20910..6726b6d 100644
+--- a/drivers/gpu/drm/radeon/radeon_state.c
++++ b/drivers/gpu/drm/radeon/radeon_state.c
+@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
+       if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+               sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+-      if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+                              sarea_priv->nbox * sizeof(depth_boxes[0])))
+               return -EFAULT;
+@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       drm_radeon_getparam_t *param = data;
+-      int value;
++      int value = 0;
+       DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 6c0ce89..57a2529 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+       man->size = size >> PAGE_SHIFT;
+ }
+-static struct vm_operations_struct radeon_ttm_vm_ops;
++static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
+ static const struct vm_operations_struct *ttm_vm_ops = NULL;
+ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+       }
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
++              pax_open_kernel();
+               radeon_ttm_vm_ops = *ttm_vm_ops;
+               radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
++              pax_close_kernel();
+       }
+       vma->vm_ops = &radeon_ttm_vm_ops;
+       return 0;
+@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
+ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+-      static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+-      static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
++      static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
++              {
++                      .name = "radeon_vram_mm",
++                      .show = &radeon_mm_dump_table,
++              },
++              {
++                      .name = "radeon_gtt_mm",
++                      .show = &radeon_mm_dump_table,
++              },
++              {
++                      .name = "ttm_page_pool",
++                      .show = &ttm_page_alloc_debugfs,
++              },
++              {
++                      .name = "ttm_dma_page_pool",
++                      .show = &ttm_dma_page_alloc_debugfs,
++              },
++      };
+       unsigned i;
+-      for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+-              if (i == 0)
+-                      sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+-              else
+-                      sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+-              radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+-              radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+-              radeon_mem_types_list[i].driver_features = 0;
+-              if (i == 0)
+-                      radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+-              else
+-                      radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+-
+-      }
+-      /* Add ttm page pool to debugfs */
+-      sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+-      radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+-      radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+-      radeon_mem_types_list[i].driver_features = 0;
+-      radeon_mem_types_list[i++].data = NULL;
++      pax_open_kernel();
++      *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
++      *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
++      pax_close_kernel();
+ #ifdef CONFIG_SWIOTLB
+-      if (swiotlb_nr_tbl()) {
+-              sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+-              radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+-              radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+-              radeon_mem_types_list[i].driver_features = 0;
+-              radeon_mem_types_list[i++].data = NULL;
+-      }
++      if (swiotlb_nr_tbl())
++              i++;
+ #endif
+       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 55880d5..9e95342 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+               if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+                       rdev->pm.sideport_bandwidth.full)
+                       rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+-              read_delay_latency.full = dfixed_const(370 * 800 * 1000);
++              read_delay_latency.full = dfixed_const(800 * 1000);
+               read_delay_latency.full = dfixed_div(read_delay_latency,
+                       rdev->pm.igp_sideport_mclk);
++              a.full = dfixed_const(370);
++              read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+       } else {
+               if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+                       rdev->pm.k8_bandwidth.full)
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index dbc2def..0a9f710 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+       zone->glob = glob;
+       glob->zone_kernel = zone;
+       ret = kobject_init_and_add(
+-              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+       if (unlikely(ret != 0)) {
+               kobject_put(&zone->kobj);
+               return ret;
+@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+       zone->glob = glob;
+       glob->zone_dma32 = zone;
+       ret = kobject_init_and_add(
+-              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+       if (unlikely(ret != 0)) {
+               kobject_put(&zone->kobj);
+               return ret;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index bd2a3b4..122d9ad 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
+ static int ttm_pool_mm_shrink(struct shrinker *shrink,
+                             struct shrink_control *sc)
+ {
+-      static atomic_t start_pool = ATOMIC_INIT(0);
++      static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
+       unsigned i;
+-      unsigned pool_offset = atomic_add_return(1, &start_pool);
++      unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
+       struct ttm_page_pool *pool;
+       int shrink_pages = sc->nr_to_scan;
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index dc0c065..58a0782 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = udl_fb_mmap;
+       }
+       pr_warn("released /dev/fb%d user=%d count=%d\n",
+diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
+index 893a650..6190d3b 100644
+--- a/drivers/gpu/drm/via/via_drv.h
++++ b/drivers/gpu/drm/via/via_drv.h
+@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
+ typedef uint32_t maskarray_t[5];
+ typedef struct drm_via_irq {
+-      atomic_t irq_received;
++      atomic_unchecked_t irq_received;
+       uint32_t pending_mask;
+       uint32_t enable_mask;
+       wait_queue_head_t irq_queue;
+@@ -75,7 +75,7 @@ typedef struct drm_via_private {
+       struct timeval last_vblank;
+       int last_vblank_valid;
+       unsigned usec_per_vblank;
+-      atomic_t vbl_received;
++      atomic_unchecked_t vbl_received;
+       drm_via_state_t hc_state;
+       char pci_buf[VIA_PCI_BUF_SIZE];
+       const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
+index ac98964..5dbf512 100644
+--- a/drivers/gpu/drm/via/via_irq.c
++++ b/drivers/gpu/drm/via/via_irq.c
+@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+       if (crtc != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+       status = VIA_READ(VIA_REG_INTERRUPT);
+       if (status & VIA_IRQ_VBLANK_PENDING) {
+-              atomic_inc(&dev_priv->vbl_received);
+-              if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++              atomic_inc_unchecked(&dev_priv->vbl_received);
++              if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
+                       do_gettimeofday(&cur_vblank);
+                       if (dev_priv->last_vblank_valid) {
+                               dev_priv->usec_per_vblank =
+@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+                       dev_priv->last_vblank = cur_vblank;
+                       dev_priv->last_vblank_valid = 1;
+               }
+-              if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++              if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
+                       DRM_DEBUG("US per vblank is: %u\n",
+                                 dev_priv->usec_per_vblank);
+               }
+@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+       for (i = 0; i < dev_priv->num_irqs; ++i) {
+               if (status & cur_irq->pending_mask) {
+-                      atomic_inc(&cur_irq->irq_received);
++                      atomic_inc_unchecked(&cur_irq->irq_received);
+                       DRM_WAKEUP(&cur_irq->irq_queue);
+                       handled = 1;
+                       if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+                           ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+                            masks[irq][4]));
+-              cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++              cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
+       } else {
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+                           (((cur_irq_sequence =
+-                             atomic_read(&cur_irq->irq_received)) -
++                             atomic_read_unchecked(&cur_irq->irq_received)) -
+                             *sequence) <= (1 << 23)));
+       }
+       *sequence = cur_irq_sequence;
+@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
+               }
+               for (i = 0; i < dev_priv->num_irqs; ++i) {
+-                      atomic_set(&cur_irq->irq_received, 0);
++                      atomic_set_unchecked(&cur_irq->irq_received, 0);
+                       cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+                       cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+                       DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+       switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+       case VIA_IRQ_RELATIVE:
+               irqwait->request.sequence +=
+-                      atomic_read(&cur_irq->irq_received);
++                      atomic_read_unchecked(&cur_irq->irq_received);
+               irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case VIA_IRQ_ABSOLUTE:
+               break;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 13aeda7..4a952d1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -290,7 +290,7 @@ struct vmw_private {
+        * Fencing and IRQs.
+        */
+-      atomic_t marker_seq;
++      atomic_unchecked_t marker_seq;
+       wait_queue_head_t fence_queue;
+       wait_queue_head_t fifo_queue;
+       int fence_queue_waiters; /* Protected by hw_mutex */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index 3eb1486..0a47ee9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+                (unsigned int) min,
+                (unsigned int) fifo->capabilities);
+-      atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
++      atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+       iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+       vmw_marker_queue_init(&fifo->marker_queue);
+       return vmw_fifo_send_fence(dev_priv, &dummy);
+@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+                               if (reserveable)
+                                       iowrite32(bytes, fifo_mem +
+                                                 SVGA_FIFO_RESERVED);
+-                              return fifo_mem + (next_cmd >> 2);
++                              return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
+                       } else {
+                               need_bounce = true;
+                       }
+@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+       fm = vmw_fifo_reserve(dev_priv, bytes);
+       if (unlikely(fm == NULL)) {
+-              *seqno = atomic_read(&dev_priv->marker_seq);
++              *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+               ret = -ENOMEM;
+               (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+                                       false, 3*HZ);
+@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+       }
+       do {
+-              *seqno = atomic_add_return(1, &dev_priv->marker_seq);
++              *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
+       } while (*seqno == 0);
+       if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index c509d40..3b640c3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
+       int ret;
+       num_clips = arg->num_clips;
+-      clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
++      clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+       if (unlikely(num_clips == 0))
+               return 0;
+@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+       int ret;
+       num_clips = arg->num_clips;
+-      clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
++      clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+       if (unlikely(num_clips == 0))
+               return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index 4640adb..e1384ed 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
+        * emitted. Then the fence is stale and signaled.
+        */
+-      ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
++      ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
+              > VMW_FENCE_WRAP);
+       return ret;
+@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+       if (fifo_idle)
+               down_read(&fifo_state->rwsem);
+-      signal_seq = atomic_read(&dev_priv->marker_seq);
++      signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
+       ret = 0;
+       for (;;) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+index 8a8725c2..afed796 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
+       while (!vmw_lag_lt(queue, us)) {
+               spin_lock(&queue->lock);
+               if (list_empty(&queue->head))
+-                      seqno = atomic_read(&dev_priv->marker_seq);
++                      seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+               else {
+                       marker = list_first_entry(&queue->head,
+                                                struct vmw_marker, head);
+diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
+index 8c04943..4370ed9 100644
+--- a/drivers/gpu/host1x/drm/dc.c
++++ b/drivers/gpu/host1x/drm/dc.c
+@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+       }
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+-              dc->debugfs_files[i].data = dc;
++              *(void **)&dc->debugfs_files[i].data = dc;
+       err = drm_debugfs_create_files(dc->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 402f486..f862d7e 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+ int hid_add_device(struct hid_device *hdev)
+ {
+-      static atomic_t id = ATOMIC_INIT(0);
++      static atomic_unchecked_t id = ATOMIC_INIT(0);
+       int ret;
+       if (WARN_ON(hdev->status & HID_STAT_ADDED))
+@@ -2309,7 +2309,7 @@ int hid_add_device(struct hid_device *hdev)
+       /* XXX hack, any other cleaner solution after the driver core
+        * is converted to allow more than 20 bytes as the device name? */
+       dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
+-                   hdev->vendor, hdev->product, atomic_inc_return(&id));
++                   hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
+       hid_debug_register(hdev, dev_name(&hdev->dev));
+       ret = device_add(&hdev->dev);
+diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
+index 90124ff..3761764 100644
+--- a/drivers/hid/hid-wiimote-debug.c
++++ b/drivers/hid/hid-wiimote-debug.c
+@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+       else if (size == 0)
+               return -EIO;
+-      if (copy_to_user(u, buf, size))
++      if (size > sizeof(buf) || copy_to_user(u, buf, size))
+               return -EFAULT;
+       *off += size;
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 0b122f8..b1d8160 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+       int ret = 0;
+       int t;
+-      next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+-      atomic_inc(&vmbus_connection.next_gpadl_handle);
++      next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
++      atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
+       ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+       if (ret)
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index ae49237..380d4c9 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
+       u64 output_address = (output) ? virt_to_phys(output) : 0;
+       u32 output_address_hi = output_address >> 32;
+       u32 output_address_lo = output_address & 0xFFFFFFFF;
+-      void *hypercall_page = hv_context.hypercall_page;
++      void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
+       __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+                             "=a"(hv_status_lo) : "d" (control_hi),
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 12f2f9e..679603c 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -591,7 +591,7 @@ enum vmbus_connect_state {
+ struct vmbus_connection {
+       enum vmbus_connect_state conn_state;
+-      atomic_t next_gpadl_handle;
++      atomic_unchecked_t next_gpadl_handle;
+       /*
+        * Represents channel interrupts. Each bit position represents a
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 4004e54..c2de226 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ {
+       int ret = 0;
+-      static atomic_t device_num = ATOMIC_INIT(0);
++      static atomic_unchecked_t device_num = ATOMIC_INIT(0);
+       dev_set_name(&child_device_obj->device, "vmbus_0_%d",
+-                   atomic_inc_return(&device_num));
++                   atomic_inc_return_unchecked(&device_num));
+       child_device_obj->device.bus = &hv_bus;
+       child_device_obj->device.parent = &hv_acpi_dev->dev;
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 6351aba..dc4aaf4 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -117,7 +117,7 @@ struct sensor_template {
+                      struct device_attribute *devattr,
+                      const char *buf, size_t count);
+       int index;
+-};
++} __do_const;
+ /* Averaging interval */
+ static int update_avg_interval(struct acpi_power_meter_resource *resource)
+@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
+                         struct sensor_template *attrs)
+ {
+       struct device *dev = &resource->acpi_dev->dev;
+-      struct sensor_device_attribute *sensors =
++      sensor_device_attribute_no_const *sensors =
+               &resource->sensors[resource->num_sensors];
+       int res = 0;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 62c2e32..8f2859a 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
+ {
+       struct applesmc_node_group *grp;
+       struct applesmc_dev_attr *node;
+-      struct attribute *attr;
++      attribute_no_const *attr;
+       int ret, i;
+       for (grp = groups; grp->format; grp++) {
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index b25c643..a13460d 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
+ struct atk_sensor_data {
+       struct list_head list;
+       struct atk_data *data;
+-      struct device_attribute label_attr;
+-      struct device_attribute input_attr;
+-      struct device_attribute limit1_attr;
+-      struct device_attribute limit2_attr;
++      device_attribute_no_const label_attr;
++      device_attribute_no_const input_attr;
++      device_attribute_no_const limit1_attr;
++      device_attribute_no_const limit2_attr;
+       char label_attr_name[ATTR_NAME_SIZE];
+       char input_attr_name[ATTR_NAME_SIZE];
+       char limit1_attr_name[ATTR_NAME_SIZE];
+@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
+ static struct device_attribute atk_name_attr =
+               __ATTR(name, 0444, atk_name_show, NULL);
+-static void atk_init_attribute(struct device_attribute *attr, char *name,
++static void atk_init_attribute(device_attribute_no_const *attr, char *name,
+               sysfs_show_func show)
+ {
+       sysfs_attr_init(&attr->attr);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 658ce3a..0d0c2f3 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block coretemp_cpu_notifier __refdata = {
++static struct notifier_block coretemp_cpu_notifier = {
+       .notifier_call = coretemp_cpu_callback,
+ };
+diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
+index 1429f6e..ee03d59 100644
+--- a/drivers/hwmon/ibmaem.c
++++ b/drivers/hwmon/ibmaem.c
+@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
+                               struct aem_rw_sensor_template *rw)
+ {
+       struct device *dev = &data->pdev->dev;
+-      struct sensor_device_attribute *sensors = data->sensors;
++      sensor_device_attribute_no_const *sensors = data->sensors;
+       int err;
+       /* Set up read-only sensors */
+diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
+index 52b77af..aed1ddf 100644
+--- a/drivers/hwmon/iio_hwmon.c
++++ b/drivers/hwmon/iio_hwmon.c
+@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+       struct iio_hwmon_state *st;
+-      struct sensor_device_attribute *a;
++      sensor_device_attribute_no_const *a;
+       int ret, i;
+       int in_i = 1, temp_i = 1, curr_i = 1;
+       enum iio_chan_type type;
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 9add6092..ee7ba3f 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
+       return 0;
+ }
+-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
++static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
+                               const char *name,
+                               umode_t mode,
+                               ssize_t (*show)(struct device *dev,
+@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+       dev_attr->store = store;
+ }
+-static void pmbus_attr_init(struct sensor_device_attribute *a,
++static void pmbus_attr_init(sensor_device_attribute_no_const *a,
+                           const char *name,
+                           umode_t mode,
+                           ssize_t (*show)(struct device *dev,
+@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
+                            u16 reg, u8 mask)
+ {
+       struct pmbus_boolean *boolean;
+-      struct sensor_device_attribute *a;
++      sensor_device_attribute_no_const *a;
+       boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
+       if (!boolean)
+@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
+                                            bool update, bool readonly)
+ {
+       struct pmbus_sensor *sensor;
+-      struct device_attribute *a;
++      device_attribute_no_const *a;
+       sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
+       if (!sensor)
+@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
+                          const char *lstring, int index)
+ {
+       struct pmbus_label *label;
+-      struct device_attribute *a;
++      device_attribute_no_const *a;
+       label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
+       if (!label)
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index 2507f90..1645765 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -169,7 +169,7 @@ struct sht15_data {
+       int                             supply_uv;
+       bool                            supply_uv_valid;
+       struct work_struct              update_supply_work;
+-      atomic_t                        interrupt_handled;
++      atomic_unchecked_t              interrupt_handled;
+ };
+ /**
+@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
+       ret = gpio_direction_input(data->pdata->gpio_data);
+       if (ret)
+               return ret;
+-      atomic_set(&data->interrupt_handled, 0);
++      atomic_set_unchecked(&data->interrupt_handled, 0);
+       enable_irq(gpio_to_irq(data->pdata->gpio_data));
+       if (gpio_get_value(data->pdata->gpio_data) == 0) {
+               disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+               /* Only relevant if the interrupt hasn't occurred. */
+-              if (!atomic_read(&data->interrupt_handled))
++              if (!atomic_read_unchecked(&data->interrupt_handled))
+                       schedule_work(&data->read_work);
+       }
+       ret = wait_event_timeout(data->wait_queue,
+@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
+       /* First disable the interrupt */
+       disable_irq_nosync(irq);
+-      atomic_inc(&data->interrupt_handled);
++      atomic_inc_unchecked(&data->interrupt_handled);
+       /* Then schedule a reading work struct */
+       if (data->state != SHT15_READING_NOTHING)
+               schedule_work(&data->read_work);
+@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
+                * If not, then start the interrupt again - care here as could
+                * have gone low in meantime so verify it hasn't!
+                */
+-              atomic_set(&data->interrupt_handled, 0);
++              atomic_set_unchecked(&data->interrupt_handled, 0);
+               enable_irq(gpio_to_irq(data->pdata->gpio_data));
+               /* If still not occurred or another handler was scheduled */
+               if (gpio_get_value(data->pdata->gpio_data)
+-                  || atomic_read(&data->interrupt_handled))
++                  || atomic_read_unchecked(&data->interrupt_handled))
+                       return;
+       }
+diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
+index 76f157b..9c0db1b 100644
+--- a/drivers/hwmon/via-cputemp.c
++++ b/drivers/hwmon/via-cputemp.c
+@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
++static struct notifier_block via_cputemp_cpu_notifier = {
+       .notifier_call = via_cputemp_cpu_callback,
+ };
+diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
+index 07f01ac..d79ad3d 100644
+--- a/drivers/i2c/busses/i2c-amd756-s4882.c
++++ b/drivers/i2c/busses/i2c-amd756-s4882.c
+@@ -43,7 +43,7 @@
+ extern struct i2c_adapter amd756_smbus;
+ static struct i2c_adapter *s4882_adapter;
+-static struct i2c_algorithm *s4882_algo;
++static i2c_algorithm_no_const *s4882_algo;
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(amd756_lock);
+diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
+index 2ca268d..c6acbdf 100644
+--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
+@@ -41,7 +41,7 @@
+ extern struct i2c_adapter *nforce2_smbus;
+ static struct i2c_adapter *s4985_adapter;
+-static struct i2c_algorithm *s4985_algo;
++static i2c_algorithm_no_const *s4985_algo;
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(nforce2_lock);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index c3ccdea..5b3dc1a 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
+                       break;
+               }
+-              data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
++              data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
+               rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
+               if (IS_ERR(rdwr_pa[i].buf)) {
+                       res = PTR_ERR(rdwr_pa[i].buf);
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index 2ff6204..218c16e 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
+               alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+               if ((unsigned long)buf & alignment
+                   || blk_rq_bytes(rq) & q->dma_pad_mask
+-                  || object_is_on_stack(buf))
++                  || object_starts_on_stack(buf))
+                       drive->dma = 0;
+       }
+ }
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index e145931..08bfc59 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
+ }
+ static
+-int __iio_device_attr_init(struct device_attribute *dev_attr,
++int __iio_device_attr_init(device_attribute_no_const *dev_attr,
+                          const char *postfix,
+                          struct iio_chan_spec const *chan,
+                          ssize_t (*readfunc)(struct device *dev,
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 784b97c..c9ceadf 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
+ struct cm_counter_group {
+       struct kobject obj;
+-      atomic_long_t counter[CM_ATTR_COUNT];
++      atomic_long_unchecked_t counter[CM_ATTR_COUNT];
+ };
+ struct cm_counter_attribute {
+@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
+       struct ib_mad_send_buf *msg = NULL;
+       int ret;
+-      atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++      atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                       counter[CM_REQ_COUNTER]);
+       /* Quick state check to discard duplicate REQs. */
+@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
+       if (!cm_id_priv)
+               return;
+-      atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++      atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                       counter[CM_REP_COUNTER]);
+       ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+       if (ret)
+@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
+       if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+           cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+               spin_unlock_irq(&cm_id_priv->lock);
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_RTU_COUNTER]);
+               goto out;
+       }
+@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
+       cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+                                  dreq_msg->local_comm_id);
+       if (!cm_id_priv) {
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               cm_issue_drep(work->port, work->mad_recv_wc);
+               return -EINVAL;
+@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
+       case IB_CM_MRA_REP_RCVD:
+               break;
+       case IB_CM_TIMEWAIT:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+                       goto unlock;
+@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
+                       cm_free_msg(msg);
+               goto deref;
+       case IB_CM_DREQ_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               goto unlock;
+       default:
+@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
+                   ib_modify_mad(cm_id_priv->av.port->mad_agent,
+                                 cm_id_priv->msg, timeout)) {
+                       if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+-                              atomic_long_inc(&work->port->
++                              atomic_long_inc_unchecked(&work->port->
+                                               counter_group[CM_RECV_DUPLICATES].
+                                               counter[CM_MRA_COUNTER]);
+                       goto out;
+@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
+               break;
+       case IB_CM_MRA_REQ_RCVD:
+       case IB_CM_MRA_REP_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_MRA_COUNTER]);
+               /* fall through */
+       default:
+@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
+       case IB_CM_LAP_IDLE:
+               break;
+       case IB_CM_MRA_LAP_SENT:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_LAP_COUNTER]);
+               if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+                       goto unlock;
+@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
+                       cm_free_msg(msg);
+               goto deref;
+       case IB_CM_LAP_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_LAP_COUNTER]);
+               goto unlock;
+       default:
+@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
+       cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+       if (cur_cm_id_priv) {
+               spin_unlock_irq(&cm.lock);
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_SIDR_REQ_COUNTER]);
+               goto out; /* Duplicate message. */
+       }
+@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+       if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+               msg->retries = 1;
+-      atomic_long_add(1 + msg->retries,
++      atomic_long_add_unchecked(1 + msg->retries,
+                       &port->counter_group[CM_XMIT].counter[attr_index]);
+       if (msg->retries)
+-              atomic_long_add(msg->retries,
++              atomic_long_add_unchecked(msg->retries,
+                               &port->counter_group[CM_XMIT_RETRIES].
+                               counter[attr_index]);
+@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+       }
+       attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+-      atomic_long_inc(&port->counter_group[CM_RECV].
++      atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
+                       counter[attr_id - CM_ATTR_ID_OFFSET]);
+       work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
+       cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+       return sprintf(buf, "%ld\n",
+-                     atomic_long_read(&group->counter[cm_attr->index]));
++                     atomic_long_read_unchecked(&group->counter[cm_attr->index]));
+ }
+ static const struct sysfs_ops cm_counter_ops = {
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index 9f5ad7c..588cd84 100644
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -98,8 +98,8 @@ struct ib_fmr_pool {
+       struct task_struct       *thread;
+-      atomic_t                  req_ser;
+-      atomic_t                  flush_ser;
++      atomic_unchecked_t        req_ser;
++      atomic_unchecked_t        flush_ser;
+       wait_queue_head_t         force_wait;
+ };
+@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+       struct ib_fmr_pool *pool = pool_ptr;
+       do {
+-              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++              if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
+                       ib_fmr_batch_release(pool);
+-                      atomic_inc(&pool->flush_ser);
++                      atomic_inc_unchecked(&pool->flush_ser);
+                       wake_up_interruptible(&pool->force_wait);
+                       if (pool->flush_function)
+@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+-              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++              if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
+                   !kthread_should_stop())
+                       schedule();
+               __set_current_state(TASK_RUNNING);
+@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+       pool->dirty_watermark = params->dirty_watermark;
+       pool->dirty_len       = 0;
+       spin_lock_init(&pool->pool_lock);
+-      atomic_set(&pool->req_ser,   0);
+-      atomic_set(&pool->flush_ser, 0);
++      atomic_set_unchecked(&pool->req_ser,   0);
++      atomic_set_unchecked(&pool->flush_ser, 0);
+       init_waitqueue_head(&pool->force_wait);
+       pool->thread = kthread_run(ib_fmr_cleanup_thread,
+@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+       }
+       spin_unlock_irq(&pool->pool_lock);
+-      serial = atomic_inc_return(&pool->req_ser);
++      serial = atomic_inc_return_unchecked(&pool->req_ser);
+       wake_up_process(pool->thread);
+       if (wait_event_interruptible(pool->force_wait,
+-                                   atomic_read(&pool->flush_ser) - serial >= 0))
++                                   atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
+               return -EINTR;
+       return 0;
+@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+               } else {
+                       list_add_tail(&fmr->list, &pool->dirty_list);
+                       if (++pool->dirty_len >= pool->dirty_watermark) {
+-                              atomic_inc(&pool->req_ser);
++                              atomic_inc_unchecked(&pool->req_ser);
+                               wake_up_process(pool->thread);
+                       }
+               }
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 4cb8eb2..146bf60 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+       int err;
+       struct fw_ri_tpte tpt;
+       u32 stag_idx;
+-      static atomic_t key;
++      static atomic_unchecked_t key;
+       if (c4iw_fatal_error(rdev))
+               return -EIO;
+@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+               if (rdev->stats.stag.cur > rdev->stats.stag.max)
+                       rdev->stats.stag.max = rdev->stats.stag.cur;
+               mutex_unlock(&rdev->stats.lock);
+-              *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
++              *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
+       }
+       PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+            __func__, stag_state, type, pdid, stag_idx);
+diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+index 79b3dbc..96e5fcc 100644
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c
+@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+               struct ib_atomic_eth *ateth;
+               struct ipath_ack_entry *e;
+               u64 vaddr;
+-              atomic64_t *maddr;
++              atomic64_unchecked_t *maddr;
+               u64 sdata;
+               u32 rkey;
+               u8 next;
+@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+                                           IB_ACCESS_REMOTE_ATOMIC)))
+                       goto nack_acc_unlck;
+               /* Perform atomic OP and save result. */
+-              maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++              maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+               sdata = be64_to_cpu(ateth->swap_data);
+               e = &qp->s_ack_queue[qp->r_head_ack_queue];
+               e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+-                      (u64) atomic64_add_return(sdata, maddr) - sdata :
++                      (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+                                     be64_to_cpu(ateth->compare_data),
+                                     sdata);
+diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
+index 1f95bba..9530f87 100644
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
+@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
+       unsigned long flags;
+       struct ib_wc wc;
+       u64 sdata;
+-      atomic64_t *maddr;
++      atomic64_unchecked_t *maddr;
+       enum ib_wc_status send_status;
+       /*
+@@ -382,11 +382,11 @@ again:
+                                           IB_ACCESS_REMOTE_ATOMIC)))
+                       goto acc_err;
+               /* Perform atomic OP and save result. */
+-              maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++              maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+               sdata = wqe->wr.wr.atomic.compare_add;
+               *(u64 *) sqp->s_sge.sge.vaddr =
+                       (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+-                      (u64) atomic64_add_return(sdata, maddr) - sdata :
++                      (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+                                     sdata, wqe->wr.wr.atomic.swap);
+               goto send_comp;
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index 9d3e5c1..d9afe4a 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
+       mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
+ }
+-int mthca_QUERY_FW(struct mthca_dev *dev)
++int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
+ {
+       struct mthca_mailbox *mailbox;
+       u32 *outbox;
+diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
+index ed9a989..e0c5871 100644
+--- a/drivers/infiniband/hw/mthca/mthca_mr.c
++++ b/drivers/infiniband/hw/mthca/mthca_mr.c
+@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
+               return key;
+ }
+-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
++int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+                  u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
+ {
+       struct mthca_mailbox *mailbox;
+diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
+index 4291410..d2ab1fb 100644
+--- a/drivers/infiniband/hw/nes/nes.c
++++ b/drivers/infiniband/hw/nes/nes.c
+@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
+ LIST_HEAD(nes_adapter_list);
+ static LIST_HEAD(nes_dev_list);
+-atomic_t qps_destroyed;
++atomic_unchecked_t qps_destroyed;
+ static unsigned int ee_flsh_adapter;
+ static unsigned int sysfs_nonidx_addr;
+@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
+       struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+       struct nes_adapter *nesadapter = nesdev->nesadapter;
+-      atomic_inc(&qps_destroyed);
++      atomic_inc_unchecked(&qps_destroyed);
+       /* Free the control structures */
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index 33cc589..3bd6538 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
+ extern unsigned int wqm_quanta;
+ extern struct list_head nes_adapter_list;
+-extern atomic_t cm_connects;
+-extern atomic_t cm_accepts;
+-extern atomic_t cm_disconnects;
+-extern atomic_t cm_closes;
+-extern atomic_t cm_connecteds;
+-extern atomic_t cm_connect_reqs;
+-extern atomic_t cm_rejects;
+-extern atomic_t mod_qp_timouts;
+-extern atomic_t qps_created;
+-extern atomic_t qps_destroyed;
+-extern atomic_t sw_qps_destroyed;
++extern atomic_unchecked_t cm_connects;
++extern atomic_unchecked_t cm_accepts;
++extern atomic_unchecked_t cm_disconnects;
++extern atomic_unchecked_t cm_closes;
++extern atomic_unchecked_t cm_connecteds;
++extern atomic_unchecked_t cm_connect_reqs;
++extern atomic_unchecked_t cm_rejects;
++extern atomic_unchecked_t mod_qp_timouts;
++extern atomic_unchecked_t qps_created;
++extern atomic_unchecked_t qps_destroyed;
++extern atomic_unchecked_t sw_qps_destroyed;
+ extern u32 mh_detected;
+ extern u32 mh_pauses_sent;
+ extern u32 cm_packets_sent;
+@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
+ extern u32 cm_packets_received;
+ extern u32 cm_packets_dropped;
+ extern u32 cm_packets_retrans;
+-extern atomic_t cm_listens_created;
+-extern atomic_t cm_listens_destroyed;
++extern atomic_unchecked_t cm_listens_created;
++extern atomic_unchecked_t cm_listens_destroyed;
+ extern u32 cm_backlog_drops;
+-extern atomic_t cm_loopbacks;
+-extern atomic_t cm_nodes_created;
+-extern atomic_t cm_nodes_destroyed;
+-extern atomic_t cm_accel_dropped_pkts;
+-extern atomic_t cm_resets_recvd;
+-extern atomic_t pau_qps_created;
+-extern atomic_t pau_qps_destroyed;
++extern atomic_unchecked_t cm_loopbacks;
++extern atomic_unchecked_t cm_nodes_created;
++extern atomic_unchecked_t cm_nodes_destroyed;
++extern atomic_unchecked_t cm_accel_dropped_pkts;
++extern atomic_unchecked_t cm_resets_recvd;
++extern atomic_unchecked_t pau_qps_created;
++extern atomic_unchecked_t pau_qps_destroyed;
+ extern u32 int_mod_timer_init;
+ extern u32 int_mod_cq_depth_256;
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 24b9f1a..00fd004 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
+ u32 cm_packets_retrans;
+ u32 cm_packets_created;
+ u32 cm_packets_received;
+-atomic_t cm_listens_created;
+-atomic_t cm_listens_destroyed;
++atomic_unchecked_t cm_listens_created;
++atomic_unchecked_t cm_listens_destroyed;
+ u32 cm_backlog_drops;
+-atomic_t cm_loopbacks;
+-atomic_t cm_nodes_created;
+-atomic_t cm_nodes_destroyed;
+-atomic_t cm_accel_dropped_pkts;
+-atomic_t cm_resets_recvd;
++atomic_unchecked_t cm_loopbacks;
++atomic_unchecked_t cm_nodes_created;
++atomic_unchecked_t cm_nodes_destroyed;
++atomic_unchecked_t cm_accel_dropped_pkts;
++atomic_unchecked_t cm_resets_recvd;
+ static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
+@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
+ static struct nes_cm_core *g_cm_core;
+-atomic_t cm_connects;
+-atomic_t cm_accepts;
+-atomic_t cm_disconnects;
+-atomic_t cm_closes;
+-atomic_t cm_connecteds;
+-atomic_t cm_connect_reqs;
+-atomic_t cm_rejects;
++atomic_unchecked_t cm_connects;
++atomic_unchecked_t cm_accepts;
++atomic_unchecked_t cm_disconnects;
++atomic_unchecked_t cm_closes;
++atomic_unchecked_t cm_connecteds;
++atomic_unchecked_t cm_connect_reqs;
++atomic_unchecked_t cm_rejects;
+ int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
+ {
+@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+               kfree(listener);
+               listener = NULL;
+               ret = 0;
+-              atomic_inc(&cm_listens_destroyed);
++              atomic_inc_unchecked(&cm_listens_destroyed);
+       } else {
+               spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+       }
+@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+                 cm_node->rem_mac);
+       add_hte_node(cm_core, cm_node);
+-      atomic_inc(&cm_nodes_created);
++      atomic_inc_unchecked(&cm_nodes_created);
+       return cm_node;
+ }
+@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+       }
+       atomic_dec(&cm_core->node_cnt);
+-      atomic_inc(&cm_nodes_destroyed);
++      atomic_inc_unchecked(&cm_nodes_destroyed);
+       nesqp = cm_node->nesqp;
+       if (nesqp) {
+               nesqp->cm_node = NULL;
+@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
+ static void drop_packet(struct sk_buff *skb)
+ {
+-      atomic_inc(&cm_accel_dropped_pkts);
++      atomic_inc_unchecked(&cm_accel_dropped_pkts);
+       dev_kfree_skb_any(skb);
+ }
+@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ {
+       int     reset = 0;      /* whether to send reset in case of err.. */
+-      atomic_inc(&cm_resets_recvd);
++      atomic_inc_unchecked(&cm_resets_recvd);
+       nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+                       " refcnt=%d\n", cm_node, cm_node->state,
+                       atomic_read(&cm_node->ref_count));
+@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+                               rem_ref_cm_node(cm_node->cm_core, cm_node);
+                               return NULL;
+                       }
+-                      atomic_inc(&cm_loopbacks);
++                      atomic_inc_unchecked(&cm_loopbacks);
+                       loopbackremotenode->loopbackpartner = cm_node;
+                       loopbackremotenode->tcp_cntxt.rcv_wscale =
+                               NES_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
+                               nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
+                       else {
+                               rem_ref_cm_node(cm_core, cm_node);
+-                              atomic_inc(&cm_accel_dropped_pkts);
++                              atomic_inc_unchecked(&cm_accel_dropped_pkts);
+                               dev_kfree_skb_any(skb);
+                       }
+                       break;
+@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+       if ((cm_id) && (cm_id->event_handler)) {
+               if (issue_disconn) {
+-                      atomic_inc(&cm_disconnects);
++                      atomic_inc_unchecked(&cm_disconnects);
+                       cm_event.event = IW_CM_EVENT_DISCONNECT;
+                       cm_event.status = disconn_status;
+                       cm_event.local_addr = cm_id->local_addr;
+@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+               }
+               if (issue_close) {
+-                      atomic_inc(&cm_closes);
++                      atomic_inc_unchecked(&cm_closes);
+                       nes_disconnect(nesqp, 1);
+                       cm_id->provider_data = nesqp;
+@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+               nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+-      atomic_inc(&cm_accepts);
++      atomic_inc_unchecked(&cm_accepts);
+       nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+                       netdev_refcnt_read(nesvnic->netdev));
+@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+       struct nes_cm_core *cm_core;
+       u8 *start_buff;
+-      atomic_inc(&cm_rejects);
++      atomic_inc_unchecked(&cm_rejects);
+       cm_node = (struct nes_cm_node *)cm_id->provider_data;
+       loopback = cm_node->loopbackpartner;
+       cm_core = cm_node->cm_core;
+@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+                 ntohl(cm_id->local_addr.sin_addr.s_addr),
+                 ntohs(cm_id->local_addr.sin_port));
+-      atomic_inc(&cm_connects);
++      atomic_inc_unchecked(&cm_connects);
+       nesqp->active_conn = 1;
+       /* cache the cm_id in the qp */
+@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
+                       g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+                       return err;
+               }
+-              atomic_inc(&cm_listens_created);
++              atomic_inc_unchecked(&cm_listens_created);
+       }
+       cm_id->add_ref(cm_id);
+@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
+       if (nesqp->destroyed)
+               return;
+-      atomic_inc(&cm_connecteds);
++      atomic_inc_unchecked(&cm_connecteds);
+       nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
+                 " local port 0x%04X. jiffies = %lu.\n",
+                 nesqp->hwqp.qp_id,
+@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
+       cm_id->add_ref(cm_id);
+       ret = cm_id->event_handler(cm_id, &cm_event);
+-      atomic_inc(&cm_closes);
++      atomic_inc_unchecked(&cm_closes);
+       cm_event.event = IW_CM_EVENT_CLOSE;
+       cm_event.status = 0;
+       cm_event.provider_data = cm_id->provider_data;
+@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
+               return;
+       cm_id = cm_node->cm_id;
+-      atomic_inc(&cm_connect_reqs);
++      atomic_inc_unchecked(&cm_connect_reqs);
+       nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+                 cm_node, cm_id, jiffies);
+@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
+               return;
+       cm_id = cm_node->cm_id;
+-      atomic_inc(&cm_connect_reqs);
++      atomic_inc_unchecked(&cm_connect_reqs);
+       nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+                 cm_node, cm_id, jiffies);
+diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
+index 4166452..fc952c3 100644
+--- a/drivers/infiniband/hw/nes/nes_mgt.c
++++ b/drivers/infiniband/hw/nes/nes_mgt.c
+@@ -40,8 +40,8 @@
+ #include "nes.h"
+ #include "nes_mgt.h"
+-atomic_t pau_qps_created;
+-atomic_t pau_qps_destroyed;
++atomic_unchecked_t pau_qps_created;
++atomic_unchecked_t pau_qps_destroyed;
+ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
+ {
+@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
+ {
+       struct sk_buff *skb;
+       unsigned long flags;
+-      atomic_inc(&pau_qps_destroyed);
++      atomic_inc_unchecked(&pau_qps_destroyed);
+       /* Free packets that have not yet been forwarded */
+       /* Lock is acquired by skb_dequeue when removing the skb */
+@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
+                                       cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
+                               skb_queue_head_init(&nesqp->pau_list);
+                               spin_lock_init(&nesqp->pau_lock);
+-                              atomic_inc(&pau_qps_created);
++                              atomic_inc_unchecked(&pau_qps_created);
+                               nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
+                       }
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 49eb511..a774366 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+       target_stat_values[++index] = mh_detected;
+       target_stat_values[++index] = mh_pauses_sent;
+       target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+-      target_stat_values[++index] = atomic_read(&cm_connects);
+-      target_stat_values[++index] = atomic_read(&cm_accepts);
+-      target_stat_values[++index] = atomic_read(&cm_disconnects);
+-      target_stat_values[++index] = atomic_read(&cm_connecteds);
+-      target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+-      target_stat_values[++index] = atomic_read(&cm_rejects);
+-      target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+-      target_stat_values[++index] = atomic_read(&qps_created);
+-      target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+-      target_stat_values[++index] = atomic_read(&qps_destroyed);
+-      target_stat_values[++index] = atomic_read(&cm_closes);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
++      target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
++      target_stat_values[++index] = atomic_read_unchecked(&qps_created);
++      target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
+       target_stat_values[++index] = cm_packets_sent;
+       target_stat_values[++index] = cm_packets_bounced;
+       target_stat_values[++index] = cm_packets_created;
+       target_stat_values[++index] = cm_packets_received;
+       target_stat_values[++index] = cm_packets_dropped;
+       target_stat_values[++index] = cm_packets_retrans;
+-      target_stat_values[++index] = atomic_read(&cm_listens_created);
+-      target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
+       target_stat_values[++index] = cm_backlog_drops;
+-      target_stat_values[++index] = atomic_read(&cm_loopbacks);
+-      target_stat_values[++index] = atomic_read(&cm_nodes_created);
+-      target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+-      target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+-      target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
+       target_stat_values[++index] = nesadapter->free_4kpbl;
+       target_stat_values[++index] = nesadapter->free_256pbl;
+       target_stat_values[++index] = int_mod_timer_init;
+       target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+       target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+       target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+-      target_stat_values[++index] = atomic_read(&pau_qps_created);
+-      target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
++      target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
+ }
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 8f67fe2..8960859 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -46,9 +46,9 @@
+ #include <rdma/ib_umem.h>
+-atomic_t mod_qp_timouts;
+-atomic_t qps_created;
+-atomic_t sw_qps_destroyed;
++atomic_unchecked_t mod_qp_timouts;
++atomic_unchecked_t qps_created;
++atomic_unchecked_t sw_qps_destroyed;
+ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
+@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+       if (init_attr->create_flags)
+               return ERR_PTR(-EINVAL);
+-      atomic_inc(&qps_created);
++      atomic_inc_unchecked(&qps_created);
+       switch (init_attr->qp_type) {
+               case IB_QPT_RC:
+                       if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
+       struct iw_cm_event cm_event;
+       int ret = 0;
+-      atomic_inc(&sw_qps_destroyed);
++      atomic_inc_unchecked(&sw_qps_destroyed);
+       nesqp->destroyed = 1;
+       /* Blow away the connection if it exists. */
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index 4d11575..3e890e5 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -51,6 +51,7 @@
+ #include <linux/completion.h>
+ #include <linux/kref.h>
+ #include <linux/sched.h>
++#include <linux/slab.h>
+ #include "qib_common.h"
+ #include "qib_verbs.h"
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index da739d9..da1c7f4 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
+  */
+ static void gameport_init_port(struct gameport *gameport)
+ {
+-      static atomic_t gameport_no = ATOMIC_INIT(0);
++      static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
+       __module_get(THIS_MODULE);
+       mutex_init(&gameport->drv_mutex);
+       device_initialize(&gameport->dev);
+       dev_set_name(&gameport->dev, "gameport%lu",
+-                      (unsigned long)atomic_inc_return(&gameport_no) - 1);
++                      (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
+       gameport->dev.bus = &gameport_bus;
+       gameport->dev.release = gameport_release_port;
+       if (gameport->parent)
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index c044699..174d71a 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
+  */
+ int input_register_device(struct input_dev *dev)
+ {
+-      static atomic_t input_no = ATOMIC_INIT(0);
++      static atomic_unchecked_t input_no = ATOMIC_INIT(0);
+       struct input_devres *devres = NULL;
+       struct input_handler *handler;
+       unsigned int packet_size;
+@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
+               dev->setkeycode = input_default_setkeycode;
+       dev_set_name(&dev->dev, "input%ld",
+-                   (unsigned long) atomic_inc_return(&input_no) - 1);
++                   (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
+       error = device_add(&dev->dev);
+       if (error)
+diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
+index 04c69af..5f92d00 100644
+--- a/drivers/input/joystick/sidewinder.c
++++ b/drivers/input/joystick/sidewinder.c
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/input.h>
+ #include <linux/gameport.h>
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index fa061d4..4a6957c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
+ static int xpad_led_probe(struct usb_xpad *xpad)
+ {
+-      static atomic_t led_seq = ATOMIC_INIT(0);
++      static atomic_unchecked_t led_seq       = ATOMIC_INIT(0);
+       long led_no;
+       struct xpad_led *led;
+       struct led_classdev *led_cdev;
+@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
+       if (!led)
+               return -ENOMEM;
+-      led_no = (long)atomic_inc_return(&led_seq) - 1;
++      led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
+       snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+       led->xpad = xpad;
+diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
+index 2f0b39d..7370f13 100644
+--- a/drivers/input/mouse/psmouse.h
++++ b/drivers/input/mouse/psmouse.h
+@@ -116,7 +116,7 @@ struct psmouse_attribute {
+       ssize_t (*set)(struct psmouse *psmouse, void *data,
+                       const char *buf, size_t count);
+       bool protect;
+-};
++} __do_const;
+ #define to_psmouse_attr(a)    container_of((a), struct psmouse_attribute, dattr)
+ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
+index 4c842c3..590b0bf 100644
+--- a/drivers/input/mousedev.c
++++ b/drivers/input/mousedev.c
+@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
+       spin_unlock_irq(&client->packet_lock);
+-      if (copy_to_user(buffer, data, count))
++      if (count > sizeof(data) || copy_to_user(buffer, data, count))
+               return -EFAULT;
+       return count;
+diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
+index 25fc597..558bf3b3 100644
+--- a/drivers/input/serio/serio.c
++++ b/drivers/input/serio/serio.c
+@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
+  */
+ static void serio_init_port(struct serio *serio)
+ {
+-      static atomic_t serio_no = ATOMIC_INIT(0);
++      static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
+       __module_get(THIS_MODULE);
+@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
+       mutex_init(&serio->drv_mutex);
+       device_initialize(&serio->dev);
+       dev_set_name(&serio->dev, "serio%ld",
+-                      (long)atomic_inc_return(&serio_no) - 1);
++                      (long)atomic_inc_return_unchecked(&serio_no) - 1);
+       serio->dev.bus = &serio_bus;
+       serio->dev.release = serio_release_port;
+       serio->dev.groups = serio_device_attr_groups;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index d8f98b1..f62a640 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
+ static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
+ {
+       bus_register_notifier(bus, &iommu_bus_nb);
+-      bus_for_each_dev(bus, NULL, ops, add_iommu_group);
++      bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
+ }
+ /**
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index dcfea4e..f4226b2 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+ void panic_if_irq_remap(const char *msg)
+ {
+       if (irq_remapping_enabled)
+-              panic(msg);
++              panic("%s", msg);
+ }
+ static void ir_ack_apic_edge(struct irq_data *data)
+@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+ void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+ {
+-      chip->irq_print_chip = ir_print_prefix;
+-      chip->irq_ack = ir_ack_apic_edge;
+-      chip->irq_eoi = ir_ack_apic_level;
+-      chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
++      pax_open_kernel();
++      *(void **)&chip->irq_print_chip = ir_print_prefix;
++      *(void **)&chip->irq_ack = ir_ack_apic_edge;
++      *(void **)&chip->irq_eoi = ir_ack_apic_level;
++      *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
++      pax_close_kernel();
+ }
+ bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 19ceaa6..3625818 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+  * Supported arch specific GIC irq extension.
+  * Default make them NULL.
+  */
+-struct irq_chip gic_arch_extn = {
++irq_chip_no_const gic_arch_extn = {
+       .irq_eoi        = NULL,
+       .irq_mask       = NULL,
+       .irq_unmask     = NULL,
+@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+       chained_irq_exit(chip, desc);
+ }
+-static struct irq_chip gic_chip = {
++static irq_chip_no_const gic_chip __read_only = {
+       .name                   = "GIC",
+       .irq_mask               = gic_mask_irq,
+       .irq_unmask             = gic_unmask_irq,
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index ac6f72b..81150f2 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -81,8 +81,8 @@ struct capiminor {
+       struct capi20_appl      *ap;
+       u32                     ncci;
+-      atomic_t                datahandle;
+-      atomic_t                msgid;
++      atomic_unchecked_t      datahandle;
++      atomic_unchecked_t      msgid;
+       struct tty_port port;
+       int                ttyinstop;
+@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
+               capimsg_setu16(s, 2, mp->ap->applid);
+               capimsg_setu8 (s, 4, CAPI_DATA_B3);
+               capimsg_setu8 (s, 5, CAPI_RESP);
+-              capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
++              capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
+               capimsg_setu32(s, 8, mp->ncci);
+               capimsg_setu16(s, 12, datahandle);
+       }
+@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
+               mp->outbytes -= len;
+               spin_unlock_bh(&mp->outlock);
+-              datahandle = atomic_inc_return(&mp->datahandle);
++              datahandle = atomic_inc_return_unchecked(&mp->datahandle);
+               skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+               memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+               capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+               capimsg_setu16(skb->data, 2, mp->ap->applid);
+               capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
+               capimsg_setu8 (skb->data, 5, CAPI_REQ);
+-              capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
++              capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
+               capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
+               capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
+               capimsg_setu16(skb->data, 16, len);     /* Data length */
+diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
+index 600c79b..3752bab 100644
+--- a/drivers/isdn/gigaset/interface.c
++++ b/drivers/isdn/gigaset/interface.c
+@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
+       }
+       tty->driver_data = cs;
+-      ++cs->port.count;
++      atomic_inc(&cs->port.count);
+-      if (cs->port.count == 1) {
++      if (atomic_read(&cs->port.count) == 1) {
+               tty_port_tty_set(&cs->port, tty);
+               cs->port.low_latency = 1;
+       }
+@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
+       if (!cs->connected)
+               gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
+-      else if (!cs->port.count)
++      else if (!atomic_read(&cs->port.count))
+               dev_warn(cs->dev, "%s: device not opened\n", __func__);
+-      else if (!--cs->port.count)
++      else if (!atomic_dec_return(&cs->port.count))
+               tty_port_tty_set(&cs->port, NULL);
+       mutex_unlock(&cs->mutex);
+diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
+index 4d9b195..455075c 100644
+--- a/drivers/isdn/hardware/avm/b1.c
++++ b/drivers/isdn/hardware/avm/b1.c
+@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
+       }
+       if (left) {
+               if (t4file->user) {
+-                      if (copy_from_user(buf, dp, left))
++                      if (left > sizeof buf || copy_from_user(buf, dp, left))
+                               return -EFAULT;
+               } else {
+                       memcpy(buf, dp, left);
+@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
+       }
+       if (left) {
+               if (config->user) {
+-                      if (copy_from_user(buf, dp, left))
++                      if (left > sizeof buf || copy_from_user(buf, dp, left))
+                               return -EFAULT;
+               } else {
+                       memcpy(buf, dp, left);
+diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
+index 3c5f249..5fac4d0 100644
+--- a/drivers/isdn/i4l/isdn_tty.c
++++ b/drivers/isdn/i4l/isdn_tty.c
+@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
+ #ifdef ISDN_DEBUG_MODEM_OPEN
+       printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
+-             port->count);
++             atomic_read(&port->count));
+ #endif
+-      port->count++;
++      atomic_inc(&port->count);
+       port->tty = tty;
+       /*
+        * Start up serial port
+@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
+ #endif
+               return;
+       }
+-      if ((tty->count == 1) && (port->count != 1)) {
++      if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
+               /*
+                * Uh, oh.  tty->count is 1, which means that the tty
+                * structure will be freed.  Info->count should always
+@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
+                * serial port won't be shutdown.
+                */
+               printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
+-                     "info->count is %d\n", port->count);
+-              port->count = 1;
++                     "info->count is %d\n", atomic_read(&port->count));
++              atomic_set(&port->count, 1);
+       }
+-      if (--port->count < 0) {
++      if (atomic_dec_return(&port->count) < 0) {
+               printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
+-                     info->line, port->count);
+-              port->count = 0;
++                     info->line, atomic_read(&port->count));
++              atomic_set(&port->count, 0);
+       }
+-      if (port->count) {
++      if (atomic_read(&port->count)) {
+ #ifdef ISDN_DEBUG_MODEM_OPEN
+               printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
+ #endif
+@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
+       if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
+               return;
+       isdn_tty_shutdown(info);
+-      port->count = 0;
++      atomic_set(&port->count, 0);
+       port->flags &= ~ASYNC_NORMAL_ACTIVE;
+       port->tty = NULL;
+       wake_up_interruptible(&port->open_wait);
+@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
+       for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
+               modem_info *info = &dev->mdm.info[i];
+-              if (info->port.count == 0)
++              if (atomic_read(&info->port.count) == 0)
+                       continue;
+               if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) &&  /* SI1 is matching */
+                   (info->emu.mdmreg[REG_SI2] == si2)) {         /* SI2 is matching */
+diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
+index e74df7c..03a03ba 100644
+--- a/drivers/isdn/icn/icn.c
++++ b/drivers/isdn/icn/icn.c
+@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
+               if (count > len)
+                       count = len;
+               if (user) {
+-                      if (copy_from_user(msg, buf, count))
++                      if (count > sizeof msg || copy_from_user(msg, buf, count))
+                               return -EFAULT;
+               } else
+                       memcpy(msg, buf, count);
+diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
+index 6a8405d..0bd1c7e 100644
+--- a/drivers/leds/leds-clevo-mail.c
++++ b/drivers/leds/leds-clevo-mail.c
+@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
+  * detected as working, but in reality it is not) as low as
+  * possible.
+  */
+-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
++static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
+       {
+               .callback = clevo_mail_led_dmi_callback,
+               .ident = "Clevo D410J",
+diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
+index 64e204e..c6bf189 100644
+--- a/drivers/leds/leds-ss4200.c
++++ b/drivers/leds/leds-ss4200.c
+@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
+  * detected as working, but in reality it is not) as low as
+  * possible.
+  */
+-static struct dmi_system_id __initdata nas_led_whitelist[] = {
++static const struct dmi_system_id __initconst nas_led_whitelist[] = {
+       {
+               .callback = ss4200_led_dmi_callback,
+               .ident = "Intel SS4200-E",
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index 0bf1e4e..b4bf44e 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -97,9 +97,17 @@ static __init int map_switcher(void)
+        * The end address needs +1 because __get_vm_area allocates an
+        * extra guard page, so we need space for that.
+        */
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
++                                   VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
++                                   + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#else
+       switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+                                    VM_ALLOC, switcher_addr, switcher_addr
+                                    + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#endif
++
+       if (!switcher_vma) {
+               err = -ENOMEM;
+               printk("lguest: could not map switcher pages high\n");
+@@ -124,7 +132,7 @@ static __init int map_switcher(void)
+        * Now the Switcher is mapped at the right address, we can't fail!
+        * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
+        */
+-      memcpy(switcher_vma->addr, start_switcher_text,
++      memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
+              end_switcher_text - start_switcher_text);
+       printk(KERN_INFO "lguest: mapped switcher at %p\n",
+diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
+index 5b9ac32..2ef4f26 100644
+--- a/drivers/lguest/page_tables.c
++++ b/drivers/lguest/page_tables.c
+@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
+ /*:*/
+ #ifdef CONFIG_X86_PAE
+-static void release_pmd(pmd_t *spmd)
++static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
+ {
+       /* If the entry's not present, there's nothing to release. */
+       if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
+index f0a3347..f6608b2 100644
+--- a/drivers/lguest/x86/core.c
++++ b/drivers/lguest/x86/core.c
+@@ -59,7 +59,7 @@ static struct {
+ /* Offset from where switcher.S was compiled to where we've copied it */
+ static unsigned long switcher_offset(void)
+ {
+-      return switcher_addr - (unsigned long)start_switcher_text;
++      return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
+ }
+ /* This cpu's struct lguest_pages (after the Switcher text page) */
+@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
+        * These copies are pretty cheap, so we do them unconditionally: */
+       /* Save the current Host top-level page directory.
+        */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      pages->state.host_cr3 = read_cr3();
++#else
+       pages->state.host_cr3 = __pa(current->mm->pgd);
++#endif
++
+       /*
+        * Set up the Guest's page tables to see this CPU's pages (and no
+        * other CPU's pages).
+@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
+        * compiled-in switcher code and the high-mapped copy we just made.
+        */
+       for (i = 0; i < IDT_ENTRIES; i++)
+-              default_idt_entries[i] += switcher_offset();
++              default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
+       /*
+        * Set up the Switcher's per-cpu areas.
+@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
+        * it will be undisturbed when we switch.  To change %cs and jump we
+        * need this structure to feed to Intel's "lcall" instruction.
+        */
+-      lguest_entry.offset = (long)switch_to_guest + switcher_offset();
++      lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
+       lguest_entry.segment = LGUEST_CS;
+       /*
+diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
+index 40634b0..4f5855e 100644
+--- a/drivers/lguest/x86/switcher_32.S
++++ b/drivers/lguest/x86/switcher_32.S
+@@ -87,6 +87,7 @@
+ #include <asm/page.h>
+ #include <asm/segment.h>
+ #include <asm/lguest.h>
++#include <asm/processor-flags.h>
+ // We mark the start of the code to copy
+ // It's placed in .text tho it's never run here
+@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
+       // Changes type when we load it: damn Intel!
+       // For after we switch over our page tables
+       // That entry will be read-only: we'd crash.
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cr0, %edx
++      xor     $X86_CR0_WP, %edx
++      mov     %edx, %cr0
++#endif
++
+       movl    $(GDT_ENTRY_TSS*8), %edx
+       ltr     %dx
+@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
+       // Let's clear it again for our return.
+       // The GDT descriptor of the Host
+       // Points to the table after two "size" bytes
+-      movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
++      movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
+       // Clear "used" from type field (byte 5, bit 2)
+-      andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
++      andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cr0, %eax
++      xor     $X86_CR0_WP, %eax
++      mov     %eax, %cr0
++#endif
+       // Once our page table's switched, the Guest is live!
+       // The Host fades as we run this final step.
+@@ -295,13 +309,12 @@ deliver_to_host:
+       // I consulted gcc, and it gave
+       // These instructions, which I gladly credit:
+       leal    (%edx,%ebx,8), %eax
+-      movzwl  (%eax),%edx
+-      movl    4(%eax), %eax
+-      xorw    %ax, %ax
+-      orl     %eax, %edx
++      movl    4(%eax), %edx
++      movw    (%eax), %dx
+       // Now the address of the handler's in %edx
+       // We call it now: its "iret" drops us home.
+-      jmp     *%edx
++      ljmp    $__KERNEL_CS, $1f
++1:    jmp     *%edx
+ // Every interrupt can come to us here
+ // But we must truly tell each apart.
+diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
+index 0003992..854bbce 100644
+--- a/drivers/md/bcache/closure.h
++++ b/drivers/md/bcache/closure.h
+@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
+ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+                                 struct workqueue_struct *wq)
+ {
+-      BUG_ON(object_is_on_stack(cl));
++      BUG_ON(object_starts_on_stack(cl));
+       closure_set_ip(cl);
+       cl->fn = fn;
+       cl->wq = wq;
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 5a2c754..0fa55db 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+                  chunk_kb ? "KB" : "B");
+       if (bitmap->storage.file) {
+               seq_printf(seq, ", file: ");
+-              seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
++              seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
+       }
+       seq_printf(seq, "\n");
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 81a79b7..87a0f73 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+           cmd == DM_LIST_VERSIONS_CMD)
+               return 0;
+-      if ((cmd == DM_DEV_CREATE_CMD)) {
++      if (cmd == DM_DEV_CREATE_CMD) {
+               if (!*param->name) {
+                       DMWARN("name not supplied when creating device");
+                       return -EINVAL;
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 699b5be..eac0a15 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -40,7 +40,7 @@ enum dm_raid1_error {
+ struct mirror {
+       struct mirror_set *ms;
+-      atomic_t error_count;
++      atomic_unchecked_t error_count;
+       unsigned long error_type;
+       struct dm_dev *dev;
+       sector_t offset;
+@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
+       struct mirror *m;
+       for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+-              if (!atomic_read(&m->error_count))
++              if (!atomic_read_unchecked(&m->error_count))
+                       return m;
+       return NULL;
+@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
+        * simple way to tell if a device has encountered
+        * errors.
+        */
+-      atomic_inc(&m->error_count);
++      atomic_inc_unchecked(&m->error_count);
+       if (test_and_set_bit(error_type, &m->error_type))
+               return;
+@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
+       struct mirror *m = get_default_mirror(ms);
+       do {
+-              if (likely(!atomic_read(&m->error_count)))
++              if (likely(!atomic_read_unchecked(&m->error_count)))
+                       return m;
+               if (m-- == ms->mirror)
+@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
+ {
+       struct mirror *default_mirror = get_default_mirror(m->ms);
+-      return !atomic_read(&default_mirror->error_count);
++      return !atomic_read_unchecked(&default_mirror->error_count);
+ }
+ static int mirror_available(struct mirror_set *ms, struct bio *bio)
+@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
+                */
+               if (likely(region_in_sync(ms, region, 1)))
+                       m = choose_mirror(ms, bio->bi_sector);
+-              else if (m && atomic_read(&m->error_count))
++              else if (m && atomic_read_unchecked(&m->error_count))
+                       m = NULL;
+               if (likely(m))
+@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+       }
+       ms->mirror[mirror].ms = ms;
+-      atomic_set(&(ms->mirror[mirror].error_count), 0);
++      atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
+       ms->mirror[mirror].error_type = 0;
+       ms->mirror[mirror].offset = offset;
+@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
+  */
+ static char device_status_char(struct mirror *m)
+ {
+-      if (!atomic_read(&(m->error_count)))
++      if (!atomic_read_unchecked(&(m->error_count)))
+               return 'A';
+       return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index d907ca6..cfb8384 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -20,7 +20,7 @@ struct stripe {
+       struct dm_dev *dev;
+       sector_t physical_start;
+-      atomic_t error_count;
++      atomic_unchecked_t error_count;
+ };
+ struct stripe_c {
+@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+                       kfree(sc);
+                       return r;
+               }
+-              atomic_set(&(sc->stripe[i].error_count), 0);
++              atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
+       }
+       ti->private = sc;
+@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
+               DMEMIT("%d ", sc->stripes);
+               for (i = 0; i < sc->stripes; i++)  {
+                       DMEMIT("%s ", sc->stripe[i].dev->name);
+-                      buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
++                      buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
+                               'D' : 'A';
+               }
+               buffer[i] = '\0';
+@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
+        */
+       for (i = 0; i < sc->stripes; i++)
+               if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
+-                      atomic_inc(&(sc->stripe[i].error_count));
+-                      if (atomic_read(&(sc->stripe[i].error_count)) <
++                      atomic_inc_unchecked(&(sc->stripe[i].error_count));
++                      if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
+                           DM_IO_ERROR_THRESHOLD)
+                               schedule_work(&sc->trigger_event);
+               }
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 1ff252a..ee384c1 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+       if (!dev_size)
+               return 0;
+-      if ((start >= dev_size) || (start + len > dev_size)) {
++      if ((start >= dev_size) || (len > dev_size - start)) {
+               DMWARN("%s: %s too small for target: "
+                      "start=%llu, len=%llu, dev_size=%llu",
+                      dm_device_name(ti->table->md), bdevname(bdev, b),
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 60bce43..9b997d0 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+ {
+       pmd->info.tm = pmd->tm;
+       pmd->info.levels = 2;
+-      pmd->info.value_type.context = pmd->data_sm;
++      pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+       pmd->info.value_type.size = sizeof(__le64);
+       pmd->info.value_type.inc = data_block_inc;
+       pmd->info.value_type.dec = data_block_dec;
+@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+       pmd->bl_info.tm = pmd->tm;
+       pmd->bl_info.levels = 1;
+-      pmd->bl_info.value_type.context = pmd->data_sm;
++      pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+       pmd->bl_info.value_type.size = sizeof(__le64);
+       pmd->bl_info.value_type.inc = data_block_inc;
+       pmd->bl_info.value_type.dec = data_block_dec;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 33f2010..23fb84c 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -169,9 +169,9 @@ struct mapped_device {
+       /*
+        * Event handling.
+        */
+-      atomic_t event_nr;
++      atomic_unchecked_t event_nr;
+       wait_queue_head_t eventq;
+-      atomic_t uevent_seq;
++      atomic_unchecked_t uevent_seq;
+       struct list_head uevent_list;
+       spinlock_t uevent_lock; /* Protect access to uevent_list */
+@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
+       rwlock_init(&md->map_lock);
+       atomic_set(&md->holders, 1);
+       atomic_set(&md->open_count, 0);
+-      atomic_set(&md->event_nr, 0);
+-      atomic_set(&md->uevent_seq, 0);
++      atomic_set_unchecked(&md->event_nr, 0);
++      atomic_set_unchecked(&md->uevent_seq, 0);
+       INIT_LIST_HEAD(&md->uevent_list);
+       spin_lock_init(&md->uevent_lock);
+@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
+       dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+-      atomic_inc(&md->event_nr);
++      atomic_inc_unchecked(&md->event_nr);
+       wake_up(&md->eventq);
+ }
+@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
+ {
+-      return atomic_add_return(1, &md->uevent_seq);
++      return atomic_add_return_unchecked(1, &md->uevent_seq);
+ }
+ uint32_t dm_get_event_nr(struct mapped_device *md)
+ {
+-      return atomic_read(&md->event_nr);
++      return atomic_read_unchecked(&md->event_nr);
+ }
+ int dm_wait_event(struct mapped_device *md, int event_nr)
+ {
+       return wait_event_interruptible(md->eventq,
+-                      (event_nr != atomic_read(&md->event_nr)));
++                      (event_nr != atomic_read_unchecked(&md->event_nr)));
+ }
+ void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 51f0345..c77810e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
+  *  start build, activate spare
+  */
+ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+-static atomic_t md_event_count;
++static atomic_unchecked_t md_event_count;
+ void md_new_event(struct mddev *mddev)
+ {
+-      atomic_inc(&md_event_count);
++      atomic_inc_unchecked(&md_event_count);
+       wake_up(&md_event_waiters);
+ }
+ EXPORT_SYMBOL_GPL(md_new_event);
+@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
+  */
+ static void md_new_event_inintr(struct mddev *mddev)
+ {
+-      atomic_inc(&md_event_count);
++      atomic_inc_unchecked(&md_event_count);
+       wake_up(&md_event_waiters);
+ }
+@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+       if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
+           (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
+               rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
+-      atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
++      atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+       rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+       bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
+       else
+               sb->resync_offset = cpu_to_le64(0);
+-      sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
++      sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
+       sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+       sb->size = cpu_to_le64(mddev->dev_sectors);
+@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+ static ssize_t
+ errors_show(struct md_rdev *rdev, char *page)
+ {
+-      return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
++      return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
+ }
+ static ssize_t
+@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+       if (*buf && (*e == 0 || *e == '\n')) {
+-              atomic_set(&rdev->corrected_errors, n);
++              atomic_set_unchecked(&rdev->corrected_errors, n);
+               return len;
+       }
+       return -EINVAL;
+@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
+       rdev->sb_loaded = 0;
+       rdev->bb_page = NULL;
+       atomic_set(&rdev->nr_pending, 0);
+-      atomic_set(&rdev->read_errors, 0);
+-      atomic_set(&rdev->corrected_errors, 0);
++      atomic_set_unchecked(&rdev->read_errors, 0);
++      atomic_set_unchecked(&rdev->corrected_errors, 0);
+       INIT_LIST_HEAD(&rdev->same_set);
+       init_waitqueue_head(&rdev->blocked_wait);
+@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+               spin_unlock(&pers_lock);
+               seq_printf(seq, "\n");
+-              seq->poll_event = atomic_read(&md_event_count);
++              seq->poll_event = atomic_read_unchecked(&md_event_count);
+               return 0;
+       }
+       if (v == (void*)2) {
+@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+               return error;
+       seq = file->private_data;
+-      seq->poll_event = atomic_read(&md_event_count);
++      seq->poll_event = atomic_read_unchecked(&md_event_count);
+       return error;
+ }
+@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+       /* always allow read */
+       mask = POLLIN | POLLRDNORM;
+-      if (seq->poll_event != atomic_read(&md_event_count))
++      if (seq->poll_event != atomic_read_unchecked(&md_event_count))
+               mask |= POLLERR | POLLPRI;
+       return mask;
+ }
+@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+               struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+               curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+                             (int)part_stat_read(&disk->part0, sectors[1]) -
+-                            atomic_read(&disk->sync_io);
++                            atomic_read_unchecked(&disk->sync_io);
+               /* sync IO will cause sync_io to increase before the disk_stats
+                * as sync_io is counted when a request starts, and
+                * disk_stats is counted when it completes.
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 653f992b6..6af6c40 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -94,13 +94,13 @@ struct md_rdev {
+                                        * only maintained for arrays that
+                                        * support hot removal
+                                        */
+-      atomic_t        read_errors;    /* number of consecutive read errors that
++      atomic_unchecked_t      read_errors;    /* number of consecutive read errors that
+                                        * we have tried to ignore.
+                                        */
+       struct timespec last_read_error;        /* monotonic time since our
+                                                * last read error
+                                                */
+-      atomic_t        corrected_errors; /* number of corrected read errors,
++      atomic_unchecked_t      corrected_errors; /* number of corrected read errors,
+                                          * for reporting to userspace and storing
+                                          * in superblock.
+                                          */
+@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+-        atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++      atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+ struct md_personality
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index 3e6d115..ffecdeb 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -71,6 +71,7 @@ struct dm_space_map {
+                                          dm_sm_threshold_fn fn,
+                                          void *context);
+ };
++typedef struct dm_space_map __no_const dm_space_map_no_const;
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 6f48244..7d29145 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+                       if (r1_sync_page_io(rdev, sect, s,
+                                           bio->bi_io_vec[idx].bv_page,
+                                           READ) != 0)
+-                              atomic_add(s, &rdev->corrected_errors);
++                              atomic_add_unchecked(s, &rdev->corrected_errors);
+               }
+               sectors -= s;
+               sect += s;
+@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+                           test_bit(In_sync, &rdev->flags)) {
+                               if (r1_sync_page_io(rdev, sect, s,
+                                                   conf->tmppage, READ)) {
+-                                      atomic_add(s, &rdev->corrected_errors);
++                                      atomic_add_unchecked(s, &rdev->corrected_errors);
+                                       printk(KERN_INFO
+                                              "md/raid1:%s: read error corrected "
+                                              "(%d sectors at %llu on %s)\n",
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 081bb33..3c4b287 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
+               /* The write handler will notice the lack of
+                * R10BIO_Uptodate and record any errors etc
+                */
+-              atomic_add(r10_bio->sectors,
++              atomic_add_unchecked(r10_bio->sectors,
+                          &conf->mirrors[d].rdev->corrected_errors);
+       /* for reconstruct, we always reschedule after a read.
+@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+ {
+       struct timespec cur_time_mon;
+       unsigned long hours_since_last;
+-      unsigned int read_errors = atomic_read(&rdev->read_errors);
++      unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
+       ktime_get_ts(&cur_time_mon);
+@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+        * overflowing the shift of read_errors by hours_since_last.
+        */
+       if (hours_since_last >= 8 * sizeof(read_errors))
+-              atomic_set(&rdev->read_errors, 0);
++              atomic_set_unchecked(&rdev->read_errors, 0);
+       else
+-              atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
++              atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
+ }
+ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
+@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+               return;
+       check_decay_read_errors(mddev, rdev);
+-      atomic_inc(&rdev->read_errors);
+-      if (atomic_read(&rdev->read_errors) > max_read_errors) {
++      atomic_inc_unchecked(&rdev->read_errors);
++      if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
+               char b[BDEVNAME_SIZE];
+               bdevname(rdev->bdev, b);
+@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+                      "md/raid10:%s: %s: Raid device exceeded "
+                      "read_error threshold [cur %d:max %d]\n",
+                      mdname(mddev), b,
+-                     atomic_read(&rdev->read_errors), max_read_errors);
++                     atomic_read_unchecked(&rdev->read_errors), max_read_errors);
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Failing raid device\n",
+                      mdname(mddev), b);
+@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+                                              sect +
+                                              choose_data_offset(r10_bio, rdev)),
+                                      bdevname(rdev->bdev, b));
+-                              atomic_add(s, &rdev->corrected_errors);
++                              atomic_add_unchecked(s, &rdev->corrected_errors);
+                       }
+                       rdev_dec_pending(rdev, mddev);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index a35b846..e295c6d 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
+                               mdname(conf->mddev), STRIPE_SECTORS,
+                               (unsigned long long)s,
+                               bdevname(rdev->bdev, b));
+-                      atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++                      atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
+                       clear_bit(R5_ReadError, &sh->dev[i].flags);
+                       clear_bit(R5_ReWrite, &sh->dev[i].flags);
+               } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+                       clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+-              if (atomic_read(&rdev->read_errors))
+-                      atomic_set(&rdev->read_errors, 0);
++              if (atomic_read_unchecked(&rdev->read_errors))
++                      atomic_set_unchecked(&rdev->read_errors, 0);
+       } else {
+               const char *bdn = bdevname(rdev->bdev, b);
+               int retry = 0;
+               int set_bad = 0;
+               clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+-              atomic_inc(&rdev->read_errors);
++              atomic_inc_unchecked(&rdev->read_errors);
+               if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+                       printk_ratelimited(
+                               KERN_WARNING
+@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
+                               mdname(conf->mddev),
+                               (unsigned long long)s,
+                               bdn);
+-              } else if (atomic_read(&rdev->read_errors)
++              } else if (atomic_read_unchecked(&rdev->read_errors)
+                        > conf->max_nr_stripes)
+                       printk(KERN_WARNING
+                              "md/raid:%s: Too many read errors, failing device %s.\n",
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 401ef64..836e563 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+                       const struct dvb_device *template, void *priv, int type)
+ {
+       struct dvb_device *dvbdev;
+-      struct file_operations *dvbdevfops;
++      file_operations_no_const *dvbdevfops;
+       struct device *clsdev;
+       int minor;
+       int id;
+diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
+index 9b6c3bb..baeb5c7 100644
+--- a/drivers/media/dvb-frontends/dib3000.h
++++ b/drivers/media/dvb-frontends/dib3000.h
+@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
+       int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
+       int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
+       int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
+-};
++} __no_const;
+ #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
+ extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
+diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
+index c7a9be1..683f6f8 100644
+--- a/drivers/media/pci/cx88/cx88-video.c
++++ b/drivers/media/pci/cx88/cx88-video.c
+@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
+ /* ------------------------------------------------------------------ */
+-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+ module_param_array(video_nr, int, NULL, 0444);
+ module_param_array(vbi_nr,   int, NULL, 0444);
+diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
+index d338b19..aae4f9e 100644
+--- a/drivers/media/platform/omap/omap_vout.c
++++ b/drivers/media/platform/omap/omap_vout.c
+@@ -63,7 +63,6 @@ enum omap_vout_channels {
+       OMAP_VIDEO2,
+ };
+-static struct videobuf_queue_ops video_vbq_ops;
+ /* Variables configurable through module params*/
+ static u32 video1_numbuffers = 3;
+ static u32 video2_numbuffers = 3;
+@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
+ {
+       struct videobuf_queue *q;
+       struct omap_vout_device *vout = NULL;
++      static struct videobuf_queue_ops video_vbq_ops = {
++              .buf_setup = omap_vout_buffer_setup,
++              .buf_prepare = omap_vout_buffer_prepare,
++              .buf_release = omap_vout_buffer_release,
++              .buf_queue = omap_vout_buffer_queue,
++      };
+       vout = video_drvdata(file);
+       v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
+       vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+       q = &vout->vbq;
+-      video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+-      video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+-      video_vbq_ops.buf_release = omap_vout_buffer_release;
+-      video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+       spin_lock_init(&vout->vbq_lock);
+       videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
+index 04e6490..2df65bf 100644
+--- a/drivers/media/platform/s5p-tv/mixer.h
++++ b/drivers/media/platform/s5p-tv/mixer.h
+@@ -156,7 +156,7 @@ struct mxr_layer {
+       /** layer index (unique identifier) */
+       int idx;
+       /** callbacks for layer methods */
+-      struct mxr_layer_ops ops;
++      struct mxr_layer_ops *ops;
+       /** format array */
+       const struct mxr_format **fmt_array;
+       /** size of format array */
+diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+index b93a21f..2535195 100644
+--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
++++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
+ {
+       struct mxr_layer *layer;
+       int ret;
+-      struct mxr_layer_ops ops = {
++      static struct mxr_layer_ops ops = {
+               .release = mxr_graph_layer_release,
+               .buffer_set = mxr_graph_buffer_set,
+               .stream_set = mxr_graph_stream_set,
+diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
+index b713403..53cb5ad 100644
+--- a/drivers/media/platform/s5p-tv/mixer_reg.c
++++ b/drivers/media/platform/s5p-tv/mixer_reg.c
+@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
+               layer->update_buf = next;
+       }
+-      layer->ops.buffer_set(layer, layer->update_buf);
++      layer->ops->buffer_set(layer, layer->update_buf);
+       if (done && done != layer->shadow_buf)
+               vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
+index ef0efdf..8c78eb6 100644
+--- a/drivers/media/platform/s5p-tv/mixer_video.c
++++ b/drivers/media/platform/s5p-tv/mixer_video.c
+@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
+       layer->geo.src.height = layer->geo.src.full_height;
+       mxr_geometry_dump(mdev, &layer->geo);
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+ }
+@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
+       layer->geo.dst.full_width = mbus_fmt.width;
+       layer->geo.dst.full_height = mbus_fmt.height;
+       layer->geo.dst.field = mbus_fmt.field;
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+ }
+@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
+       /* set source size to highest accepted value */
+       geo->src.full_width = max(geo->dst.full_width, pix->width);
+       geo->src.full_height = max(geo->dst.full_height, pix->height);
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* set cropping to total visible screen */
+       geo->src.width = pix->width;
+@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
+       geo->src.x_offset = 0;
+       geo->src.y_offset = 0;
+       /* assure consistency of geometry */
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* set full size to lowest possible value */
+       geo->src.full_width = 0;
+       geo->src.full_height = 0;
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* returning results */
+@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
+               target->width = s->r.width;
+               target->height = s->r.height;
+-              layer->ops.fix_geometry(layer, stage, s->flags);
++              layer->ops->fix_geometry(layer, stage, s->flags);
+               /* retrieve update selection rectangle */
+               res.left = target->x_offset;
+@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
+       mxr_output_get(mdev);
+       mxr_layer_update_output(layer);
+-      layer->ops.format_set(layer);
++      layer->ops->format_set(layer);
+       /* enabling layer in hardware */
+       spin_lock_irqsave(&layer->enq_slock, flags);
+       layer->state = MXR_LAYER_STREAMING;
+       spin_unlock_irqrestore(&layer->enq_slock, flags);
+-      layer->ops.stream_set(layer, MXR_ENABLE);
++      layer->ops->stream_set(layer, MXR_ENABLE);
+       mxr_streamer_get(mdev);
+       return 0;
+@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
+       spin_unlock_irqrestore(&layer->enq_slock, flags);
+       /* disabling layer in hardware */
+-      layer->ops.stream_set(layer, MXR_DISABLE);
++      layer->ops->stream_set(layer, MXR_DISABLE);
+       /* remove one streamer */
+       mxr_streamer_put(mdev);
+       /* allow changes in output configuration */
+@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
+ void mxr_layer_release(struct mxr_layer *layer)
+ {
+-      if (layer->ops.release)
+-              layer->ops.release(layer);
++      if (layer->ops->release)
++              layer->ops->release(layer);
+ }
+ void mxr_base_layer_release(struct mxr_layer *layer)
+@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+       layer->mdev = mdev;
+       layer->idx = idx;
+-      layer->ops = *ops;
++      layer->ops = ops;
+       spin_lock_init(&layer->enq_slock);
+       INIT_LIST_HEAD(&layer->enq_list);
+diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+index 3d13a63..da31bf1 100644
+--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
++++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
+ {
+       struct mxr_layer *layer;
+       int ret;
+-      struct mxr_layer_ops ops = {
++      static struct mxr_layer_ops ops = {
+               .release = mxr_vp_layer_release,
+               .buffer_set = mxr_vp_buffer_set,
+               .stream_set = mxr_vp_stream_set,
+diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
+index 545c04c..a14bded 100644
+--- a/drivers/media/radio/radio-cadet.c
++++ b/drivers/media/radio/radio-cadet.c
+@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+       unsigned char readbuf[RDS_BUFFER];
+       int i = 0;
++      if (count > RDS_BUFFER)
++              return -EFAULT;
+       mutex_lock(&dev->lock);
+       if (dev->rdsstat == 0)
+               cadet_start_rds(dev);
+@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+       while (i < count && dev->rdsin != dev->rdsout)
+               readbuf[i++] = dev->rdsbuf[dev->rdsout++];
+-      if (i && copy_to_user(data, readbuf, i))
++      if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
+               i = -EFAULT;
+ unlock:
+       mutex_unlock(&dev->lock);
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 3940bb0..fb3952a 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
+ struct dib0700_adapter_state {
+       int (*set_param_save) (struct dvb_frontend *);
+-};
++} __no_const;
+ static int dib7070_set_param_override(struct dvb_frontend *fe)
+ {
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 6e237b6..dc25556 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -118,7 +118,7 @@ struct su3000_state {
+ struct s6x0_state {
+       int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
+-};
++} __no_const;
+ /* debug */
+ static int dvb_usb_dw2102_debug;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index f129551..ecf6514 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
+       __u32                   reserved;
+ };
+-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
++static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+                               enum v4l2_memory memory)
+ {
+       void __user *up_pln;
+@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+       return 0;
+ }
+-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
++static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+                               enum v4l2_memory memory)
+ {
+       if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
+               put_user(kp->start_block, &up->start_block) ||
+               put_user(kp->blocks, &up->blocks) ||
+               put_user(tmp, &up->edid) ||
+-              copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
++              copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+                       return -EFAULT;
+       return 0;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 7658586..1079260 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
+                               struct file *file, void *fh, void *p);
+       } u;
+       void (*debug)(const void *arg, bool write_only);
+-};
++} __do_const;
++typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
+ /* This control needs a priority check */
+ #define INFO_FL_PRIO  (1 << 0)
+@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
+       struct video_device *vfd = video_devdata(file);
+       const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+       bool write_only = false;
+-      struct v4l2_ioctl_info default_info;
++      v4l2_ioctl_info_no_const default_info;
+       const struct v4l2_ioctl_info *info;
+       void *fh = file->private_data;
+       struct v4l2_fh *vfh = NULL;
+@@ -2251,7 +2252,7 @@ done:
+ }
+ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+-                          void * __user *user_ptr, void ***kernel_ptr)
++                          void __user **user_ptr, void ***kernel_ptr)
+ {
+       int ret = 0;
+@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+                               ret = -EINVAL;
+                               break;
+                       }
+-                      *user_ptr = (void __user *)buf->m.planes;
++                      *user_ptr = (void __force_user *)buf->m.planes;
+                       *kernel_ptr = (void *)&buf->m.planes;
+                       *array_size = sizeof(struct v4l2_plane) * buf->length;
+                       ret = 1;
+@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+                               ret = -EINVAL;
+                               break;
+                       }
+-                      *user_ptr = (void __user *)ctrls->controls;
++                      *user_ptr = (void __force_user *)ctrls->controls;
+                       *kernel_ptr = (void *)&ctrls->controls;
+                       *array_size = sizeof(struct v4l2_ext_control)
+                                   * ctrls->count;
+diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
+index 767ff4d..c69d259 100644
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+       seq_printf(m, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+       seq_printf(m, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
++#else
+       seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+                                       (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
++#endif
++
+       /*
+        *  Rounding UP to nearest 4-kB boundary here...
+        */
+@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+                                       ioc->facts.GlobalCredits);
+       seq_printf(m, "  Frames   @ 0x%p (Dma @ 0x%p)\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                                      NULL, NULL);
++#else
+                                       (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
++#endif
+       sz = (ioc->reply_sz * ioc->reply_depth) + 128;
+       seq_printf(m, "    {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+                                       ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index dd239bd..689c4f7 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
+               return 0;
+ }
++static inline void
++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
++{
++      if (phy_info->port_details) {
++              phy_info->port_details->rphy = rphy;
++              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
++                  ioc->name, rphy));
++      }
++
++      if (rphy) {
++              dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
++                  &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
++              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
++                  ioc->name, rphy, rphy->dev.release));
++      }
++}
++
+ /* no mutex */
+ static void
+ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
+               return NULL;
+ }
+-static inline void
+-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+-{
+-      if (phy_info->port_details) {
+-              phy_info->port_details->rphy = rphy;
+-              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+-                  ioc->name, rphy));
+-      }
+-
+-      if (rphy) {
+-              dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+-                  &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+-              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+-                  ioc->name, rphy, rphy->dev.release));
+-      }
+-}
+-
+ static inline struct sas_port *
+ mptsas_get_port(struct mptsas_phyinfo *phy_info)
+ {
+diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
+index 727819c..ad74694 100644
+--- a/drivers/message/fusion/mptscsih.c
++++ b/drivers/message/fusion/mptscsih.c
+@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
+       h = shost_priv(SChost);
+-      if (h) {
+-              if (h->info_kbuf == NULL)
+-                      if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+-                              return h->info_kbuf;
+-              h->info_kbuf[0] = '\0';
++      if (!h)
++              return NULL;
+-              mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+-              h->info_kbuf[size-1] = '\0';
+-      }
++      if (h->info_kbuf == NULL)
++              if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
++                      return h->info_kbuf;
++      h->info_kbuf[0] = '\0';
++
++      mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
++      h->info_kbuf[size-1] = '\0';
+       return h->info_kbuf;
+ }
+diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
+index b7d87cd..9890039 100644
+--- a/drivers/message/i2o/i2o_proc.c
++++ b/drivers/message/i2o/i2o_proc.c
+@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
+       "Array Controller Device"
+ };
+-static char *chtostr(char *tmp, u8 *chars, int n)
+-{
+-      tmp[0] = 0;
+-      return strncat(tmp, (char *)chars, n);
+-}
+-
+ static int i2o_report_query_status(struct seq_file *seq, int block_status,
+                                  char *group)
+ {
+@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
+       } *result;
+       i2o_exec_execute_ddm_table ddm_table;
+-      char tmp[28 + 1];
+       result = kmalloc(sizeof(*result), GFP_KERNEL);
+       if (!result)
+@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
+               seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+               seq_printf(seq, "%-#8x", ddm_table.module_id);
+-              seq_printf(seq, "%-29s",
+-                         chtostr(tmp, ddm_table.module_name_version, 28));
++              seq_printf(seq, "%-.28s", ddm_table.module_name_version);
+               seq_printf(seq, "%9d  ", ddm_table.data_size);
+               seq_printf(seq, "%8d", ddm_table.code_size);
+@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
+       i2o_driver_result_table *result;
+       i2o_driver_store_table *dst;
+-      char tmp[28 + 1];
+       result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
+       if (result == NULL)
+@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
+               seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+               seq_printf(seq, "%-#8x", dst->module_id);
+-              seq_printf(seq, "%-29s",
+-                         chtostr(tmp, dst->module_name_version, 28));
+-              seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
++              seq_printf(seq, "%-.28s", dst->module_name_version);
++              seq_printf(seq, "%-.8s", dst->date);
+               seq_printf(seq, "%8d ", dst->module_size);
+               seq_printf(seq, "%8d ", dst->mpb_size);
+               seq_printf(seq, "0x%04x", dst->module_flags);
+@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+       // == (allow) 512d bytes (max)
+       static u16 *work16 = (u16 *) work32;
+       int token;
+-      char tmp[16 + 1];
+       token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
+@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+       seq_printf(seq, "Device Class  : %s\n", i2o_get_class_name(work16[0]));
+       seq_printf(seq, "Owner TID     : %0#5x\n", work16[2]);
+       seq_printf(seq, "Parent TID    : %0#5x\n", work16[3]);
+-      seq_printf(seq, "Vendor info   : %s\n",
+-                 chtostr(tmp, (u8 *) (work32 + 2), 16));
+-      seq_printf(seq, "Product info  : %s\n",
+-                 chtostr(tmp, (u8 *) (work32 + 6), 16));
+-      seq_printf(seq, "Description   : %s\n",
+-                 chtostr(tmp, (u8 *) (work32 + 10), 16));
+-      seq_printf(seq, "Product rev.  : %s\n",
+-                 chtostr(tmp, (u8 *) (work32 + 14), 8));
++      seq_printf(seq, "Vendor info   : %.16s\n", (u8 *) (work32 + 2));
++      seq_printf(seq, "Product info  : %.16s\n", (u8 *) (work32 + 6));
++      seq_printf(seq, "Description   : %.16s\n", (u8 *) (work32 + 10));
++      seq_printf(seq, "Product rev.  : %.8s\n", (u8 *) (work32 + 14));
+       seq_printf(seq, "Serial number : ");
+       print_serial_number(seq, (u8 *) (work32 + 16),
+@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
+               u8 pad[256];    // allow up to 256 byte (max) serial number
+       } result;
+-      char tmp[24 + 1];
+-
+       token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
+       if (token < 0) {
+@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
+       }
+       seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+-      seq_printf(seq, "Module name         : %s\n",
+-                 chtostr(tmp, result.module_name, 24));
+-      seq_printf(seq, "Module revision     : %s\n",
+-                 chtostr(tmp, result.module_rev, 8));
++      seq_printf(seq, "Module name         : %.24s\n", result.module_name);
++      seq_printf(seq, "Module revision     : %.8s\n", result.module_rev);
+       seq_printf(seq, "Serial number       : ");
+       print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+               u8 instance_number[4];
+       } result;
+-      char tmp[64 + 1];
+-
+       token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
+       if (token < 0) {
+@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+               return 0;
+       }
+-      seq_printf(seq, "Device name     : %s\n",
+-                 chtostr(tmp, result.device_name, 64));
+-      seq_printf(seq, "Service name    : %s\n",
+-                 chtostr(tmp, result.service_name, 64));
+-      seq_printf(seq, "Physical name   : %s\n",
+-                 chtostr(tmp, result.physical_location, 64));
+-      seq_printf(seq, "Instance number : %s\n",
+-                 chtostr(tmp, result.instance_number, 4));
++      seq_printf(seq, "Device name     : %.64s\n", result.device_name);
++      seq_printf(seq, "Service name    : %.64s\n", result.service_name);
++      seq_printf(seq, "Physical name   : %.64s\n", result.physical_location);
++      seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
+       return 0;
+ }
+diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
+index a8c08f3..155fe3d 100644
+--- a/drivers/message/i2o/iop.c
++++ b/drivers/message/i2o/iop.c
+@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
+       spin_lock_irqsave(&c->context_list_lock, flags);
+-      if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+-              atomic_inc(&c->context_list_counter);
++      if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
++              atomic_inc_unchecked(&c->context_list_counter);
+-      entry->context = atomic_read(&c->context_list_counter);
++      entry->context = atomic_read_unchecked(&c->context_list_counter);
+       list_add(&entry->list, &c->context_list);
+@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
+ #if BITS_PER_LONG == 64
+       spin_lock_init(&c->context_list_lock);
+-      atomic_set(&c->context_list_counter, 0);
++      atomic_set_unchecked(&c->context_list_counter, 0);
+       INIT_LIST_HEAD(&c->context_list);
+ #endif
+diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
+index 45ece11..8efa218 100644
+--- a/drivers/mfd/janz-cmodio.c
++++ b/drivers/mfd/janz-cmodio.c
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
+index a5f9888..1c0ed56 100644
+--- a/drivers/mfd/twl4030-irq.c
++++ b/drivers/mfd/twl4030-irq.c
+@@ -35,6 +35,7 @@
+ #include <linux/of.h>
+ #include <linux/irqdomain.h>
+ #include <linux/i2c/twl.h>
++#include <asm/pgtable.h>
+ #include "twl-core.h"
+@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
+        * Install an irq handler for each of the SIH modules;
+        * clone dummy irq_chip since PIH can't *do* anything
+        */
+-      twl4030_irq_chip = dummy_irq_chip;
+-      twl4030_irq_chip.name = "twl4030";
++      pax_open_kernel();
++      memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
++      *(const char **)&twl4030_irq_chip.name = "twl4030";
+-      twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
++      *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
++      pax_close_kernel();
+       for (i = irq_base; i < irq_end; i++) {
+               irq_set_chip_and_handler(i, &twl4030_irq_chip,
+diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
+index 277a8db..0e0b754 100644
+--- a/drivers/mfd/twl6030-irq.c
++++ b/drivers/mfd/twl6030-irq.c
+@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
+        * install an irq handler for each of the modules;
+        * clone dummy irq_chip since PIH can't *do* anything
+        */
+-      twl6030_irq_chip = dummy_irq_chip;
+-      twl6030_irq_chip.name = "twl6030";
+-      twl6030_irq_chip.irq_set_type = NULL;
+-      twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
++      pax_open_kernel();
++      memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
++      *(const char **)&twl6030_irq_chip.name = "twl6030";
++      *(void **)&twl6030_irq_chip.irq_set_type = NULL;
++      *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
++      pax_close_kernel();
+       for (i = irq_base; i < irq_end; i++) {
+               irq_set_chip_and_handler(i, &twl6030_irq_chip,
+diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
+index f32550a..e3e52a2 100644
+--- a/drivers/misc/c2port/core.c
++++ b/drivers/misc/c2port/core.c
+@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
+       mutex_init(&c2dev->mutex);
+       /* Create binary file */
+-      c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
++      pax_open_kernel();
++      *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
++      pax_close_kernel();
+       ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
+       if (unlikely(ret))
+               goto error_device_create_bin_file;
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 36f5d52..32311c3 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
+       char before[BREAK_INSTR_SIZE];
+       char after[BREAK_INSTR_SIZE];
+-      probe_kernel_read(before, (char *)kgdbts_break_test,
++      probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
+         BREAK_INSTR_SIZE);
+       init_simple_test();
+       ts.tst = plant_and_detach_test;
+@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
+       /* Activate test with initial breakpoint */
+       if (!is_early)
+               kgdb_breakpoint();
+-      probe_kernel_read(after, (char *)kgdbts_break_test,
++      probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
+         BREAK_INSTR_SIZE);
+       if (memcmp(before, after, BREAK_INSTR_SIZE)) {
+               printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index 4cd4a3d..b48cbc7 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
+        * the lid is closed. This leads to interrupts as soon as a little move
+        * is done.
+        */
+-      atomic_inc(&lis3->count);
++      atomic_inc_unchecked(&lis3->count);
+       wake_up_interruptible(&lis3->misc_wait);
+       kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
+@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+       if (lis3->pm_dev)
+               pm_runtime_get_sync(lis3->pm_dev);
+-      atomic_set(&lis3->count, 0);
++      atomic_set_unchecked(&lis3->count, 0);
+       return 0;
+ }
+@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
+       add_wait_queue(&lis3->misc_wait, &wait);
+       while (true) {
+               set_current_state(TASK_INTERRUPTIBLE);
+-              data = atomic_xchg(&lis3->count, 0);
++              data = atomic_xchg_unchecked(&lis3->count, 0);
+               if (data)
+                       break;
+@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+                                             struct lis3lv02d, miscdev);
+       poll_wait(file, &lis3->misc_wait, wait);
+-      if (atomic_read(&lis3->count))
++      if (atomic_read_unchecked(&lis3->count))
+               return POLLIN | POLLRDNORM;
+       return 0;
+ }
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
+index c439c82..1f20f57 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.h
++++ b/drivers/misc/lis3lv02d/lis3lv02d.h
+@@ -297,7 +297,7 @@ struct lis3lv02d {
+       struct input_polled_dev *idev;     /* input device */
+       struct platform_device  *pdev;     /* platform device */
+       struct regulator_bulk_data regulators[2];
+-      atomic_t                count;     /* interrupt count after last read */
++      atomic_unchecked_t      count;     /* interrupt count after last read */
+       union axis_conversion   ac;        /* hw -> logical axis */
+       int                     mapped_btns[3];
+diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
+index 2f30bad..c4c13d0 100644
+--- a/drivers/misc/sgi-gru/gruhandles.c
++++ b/drivers/misc/sgi-gru/gruhandles.c
+@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
+       unsigned long nsec;
+       nsec = CLKS2NSEC(clks);
+-      atomic_long_inc(&mcs_op_statistics[op].count);
+-      atomic_long_add(nsec, &mcs_op_statistics[op].total);
++      atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
++      atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
+       if (mcs_op_statistics[op].max < nsec)
+               mcs_op_statistics[op].max = nsec;
+ }
+diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
+index 797d796..ae8f01e 100644
+--- a/drivers/misc/sgi-gru/gruprocfs.c
++++ b/drivers/misc/sgi-gru/gruprocfs.c
+@@ -32,9 +32,9 @@
+ #define printstat(s, f)               printstat_val(s, &gru_stats.f, #f)
+-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
+ {
+-      unsigned long val = atomic_long_read(v);
++      unsigned long val = atomic_long_read_unchecked(v);
+       seq_printf(s, "%16lu %s\n", val, id);
+ }
+@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
+       seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+       for (op = 0; op < mcsop_last; op++) {
+-              count = atomic_long_read(&mcs_op_statistics[op].count);
+-              total = atomic_long_read(&mcs_op_statistics[op].total);
++              count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
++              total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
+               max = mcs_op_statistics[op].max;
+               seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+                          count ? total / count : 0, max);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index 5c3ce24..4915ccb 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
+  * GRU statistics.
+  */
+ struct gru_stats_s {
+-      atomic_long_t vdata_alloc;
+-      atomic_long_t vdata_free;
+-      atomic_long_t gts_alloc;
+-      atomic_long_t gts_free;
+-      atomic_long_t gms_alloc;
+-      atomic_long_t gms_free;
+-      atomic_long_t gts_double_allocate;
+-      atomic_long_t assign_context;
+-      atomic_long_t assign_context_failed;
+-      atomic_long_t free_context;
+-      atomic_long_t load_user_context;
+-      atomic_long_t load_kernel_context;
+-      atomic_long_t lock_kernel_context;
+-      atomic_long_t unlock_kernel_context;
+-      atomic_long_t steal_user_context;
+-      atomic_long_t steal_kernel_context;
+-      atomic_long_t steal_context_failed;
+-      atomic_long_t nopfn;
+-      atomic_long_t asid_new;
+-      atomic_long_t asid_next;
+-      atomic_long_t asid_wrap;
+-      atomic_long_t asid_reuse;
+-      atomic_long_t intr;
+-      atomic_long_t intr_cbr;
+-      atomic_long_t intr_tfh;
+-      atomic_long_t intr_spurious;
+-      atomic_long_t intr_mm_lock_failed;
+-      atomic_long_t call_os;
+-      atomic_long_t call_os_wait_queue;
+-      atomic_long_t user_flush_tlb;
+-      atomic_long_t user_unload_context;
+-      atomic_long_t user_exception;
+-      atomic_long_t set_context_option;
+-      atomic_long_t check_context_retarget_intr;
+-      atomic_long_t check_context_unload;
+-      atomic_long_t tlb_dropin;
+-      atomic_long_t tlb_preload_page;
+-      atomic_long_t tlb_dropin_fail_no_asid;
+-      atomic_long_t tlb_dropin_fail_upm;
+-      atomic_long_t tlb_dropin_fail_invalid;
+-      atomic_long_t tlb_dropin_fail_range_active;
+-      atomic_long_t tlb_dropin_fail_idle;
+-      atomic_long_t tlb_dropin_fail_fmm;
+-      atomic_long_t tlb_dropin_fail_no_exception;
+-      atomic_long_t tfh_stale_on_fault;
+-      atomic_long_t mmu_invalidate_range;
+-      atomic_long_t mmu_invalidate_page;
+-      atomic_long_t flush_tlb;
+-      atomic_long_t flush_tlb_gru;
+-      atomic_long_t flush_tlb_gru_tgh;
+-      atomic_long_t flush_tlb_gru_zero_asid;
++      atomic_long_unchecked_t vdata_alloc;
++      atomic_long_unchecked_t vdata_free;
++      atomic_long_unchecked_t gts_alloc;
++      atomic_long_unchecked_t gts_free;
++      atomic_long_unchecked_t gms_alloc;
++      atomic_long_unchecked_t gms_free;
++      atomic_long_unchecked_t gts_double_allocate;
++      atomic_long_unchecked_t assign_context;
++      atomic_long_unchecked_t assign_context_failed;
++      atomic_long_unchecked_t free_context;
++      atomic_long_unchecked_t load_user_context;
++      atomic_long_unchecked_t load_kernel_context;
++      atomic_long_unchecked_t lock_kernel_context;
++      atomic_long_unchecked_t unlock_kernel_context;
++      atomic_long_unchecked_t steal_user_context;
++      atomic_long_unchecked_t steal_kernel_context;
++      atomic_long_unchecked_t steal_context_failed;
++      atomic_long_unchecked_t nopfn;
++      atomic_long_unchecked_t asid_new;
++      atomic_long_unchecked_t asid_next;
++      atomic_long_unchecked_t asid_wrap;
++      atomic_long_unchecked_t asid_reuse;
++      atomic_long_unchecked_t intr;
++      atomic_long_unchecked_t intr_cbr;
++      atomic_long_unchecked_t intr_tfh;
++      atomic_long_unchecked_t intr_spurious;
++      atomic_long_unchecked_t intr_mm_lock_failed;
++      atomic_long_unchecked_t call_os;
++      atomic_long_unchecked_t call_os_wait_queue;
++      atomic_long_unchecked_t user_flush_tlb;
++      atomic_long_unchecked_t user_unload_context;
++      atomic_long_unchecked_t user_exception;
++      atomic_long_unchecked_t set_context_option;
++      atomic_long_unchecked_t check_context_retarget_intr;
++      atomic_long_unchecked_t check_context_unload;
++      atomic_long_unchecked_t tlb_dropin;
++      atomic_long_unchecked_t tlb_preload_page;
++      atomic_long_unchecked_t tlb_dropin_fail_no_asid;
++      atomic_long_unchecked_t tlb_dropin_fail_upm;
++      atomic_long_unchecked_t tlb_dropin_fail_invalid;
++      atomic_long_unchecked_t tlb_dropin_fail_range_active;
++      atomic_long_unchecked_t tlb_dropin_fail_idle;
++      atomic_long_unchecked_t tlb_dropin_fail_fmm;
++      atomic_long_unchecked_t tlb_dropin_fail_no_exception;
++      atomic_long_unchecked_t tfh_stale_on_fault;
++      atomic_long_unchecked_t mmu_invalidate_range;
++      atomic_long_unchecked_t mmu_invalidate_page;
++      atomic_long_unchecked_t flush_tlb;
++      atomic_long_unchecked_t flush_tlb_gru;
++      atomic_long_unchecked_t flush_tlb_gru_tgh;
++      atomic_long_unchecked_t flush_tlb_gru_zero_asid;
+-      atomic_long_t copy_gpa;
+-      atomic_long_t read_gpa;
++      atomic_long_unchecked_t copy_gpa;
++      atomic_long_unchecked_t read_gpa;
+-      atomic_long_t mesq_receive;
+-      atomic_long_t mesq_receive_none;
+-      atomic_long_t mesq_send;
+-      atomic_long_t mesq_send_failed;
+-      atomic_long_t mesq_noop;
+-      atomic_long_t mesq_send_unexpected_error;
+-      atomic_long_t mesq_send_lb_overflow;
+-      atomic_long_t mesq_send_qlimit_reached;
+-      atomic_long_t mesq_send_amo_nacked;
+-      atomic_long_t mesq_send_put_nacked;
+-      atomic_long_t mesq_page_overflow;
+-      atomic_long_t mesq_qf_locked;
+-      atomic_long_t mesq_qf_noop_not_full;
+-      atomic_long_t mesq_qf_switch_head_failed;
+-      atomic_long_t mesq_qf_unexpected_error;
+-      atomic_long_t mesq_noop_unexpected_error;
+-      atomic_long_t mesq_noop_lb_overflow;
+-      atomic_long_t mesq_noop_qlimit_reached;
+-      atomic_long_t mesq_noop_amo_nacked;
+-      atomic_long_t mesq_noop_put_nacked;
+-      atomic_long_t mesq_noop_page_overflow;
++      atomic_long_unchecked_t mesq_receive;
++      atomic_long_unchecked_t mesq_receive_none;
++      atomic_long_unchecked_t mesq_send;
++      atomic_long_unchecked_t mesq_send_failed;
++      atomic_long_unchecked_t mesq_noop;
++      atomic_long_unchecked_t mesq_send_unexpected_error;
++      atomic_long_unchecked_t mesq_send_lb_overflow;
++      atomic_long_unchecked_t mesq_send_qlimit_reached;
++      atomic_long_unchecked_t mesq_send_amo_nacked;
++      atomic_long_unchecked_t mesq_send_put_nacked;
++      atomic_long_unchecked_t mesq_page_overflow;
++      atomic_long_unchecked_t mesq_qf_locked;
++      atomic_long_unchecked_t mesq_qf_noop_not_full;
++      atomic_long_unchecked_t mesq_qf_switch_head_failed;
++      atomic_long_unchecked_t mesq_qf_unexpected_error;
++      atomic_long_unchecked_t mesq_noop_unexpected_error;
++      atomic_long_unchecked_t mesq_noop_lb_overflow;
++      atomic_long_unchecked_t mesq_noop_qlimit_reached;
++      atomic_long_unchecked_t mesq_noop_amo_nacked;
++      atomic_long_unchecked_t mesq_noop_put_nacked;
++      atomic_long_unchecked_t mesq_noop_page_overflow;
+ };
+@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
+       tghop_invalidate, mcsop_last};
+ struct mcs_op_statistic {
+-      atomic_long_t   count;
+-      atomic_long_t   total;
++      atomic_long_unchecked_t count;
++      atomic_long_unchecked_t total;
+       unsigned long   max;
+ };
+@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
+ #define STAT(id)      do {                                            \
+                               if (gru_options & OPT_STATS)            \
+-                                      atomic_long_inc(&gru_stats.id); \
++                                      atomic_long_inc_unchecked(&gru_stats.id);       \
+                       } while (0)
+ #ifdef CONFIG_SGI_GRU_DEBUG
+diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
+index c862cd4..0d176fe 100644
+--- a/drivers/misc/sgi-xp/xp.h
++++ b/drivers/misc/sgi-xp/xp.h
+@@ -288,7 +288,7 @@ struct xpc_interface {
+                                       xpc_notify_func, void *);
+       void (*received) (short, int, void *);
+       enum xp_retval (*partid_to_nasids) (short, void *);
+-};
++} __no_const;
+ extern struct xpc_interface xpc_interface;
+diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
+index b94d5f7..7f494c5 100644
+--- a/drivers/misc/sgi-xp/xpc.h
++++ b/drivers/misc/sgi-xp/xpc.h
+@@ -835,6 +835,7 @@ struct xpc_arch_operations {
+       void (*received_payload) (struct xpc_channel *, void *);
+       void (*notify_senders_of_disconnect) (struct xpc_channel *);
+ };
++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
+ /* struct xpc_partition act_state values (for XPC HB) */
+@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
+ /* found in xpc_main.c */
+ extern struct device *xpc_part;
+ extern struct device *xpc_chan;
+-extern struct xpc_arch_operations xpc_arch_ops;
++extern xpc_arch_operations_no_const xpc_arch_ops;
+ extern int xpc_disengage_timelimit;
+ extern int xpc_disengage_timedout;
+ extern int xpc_activate_IRQ_rcvd;
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index d971817..33bdca5 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
+       .notifier_call = xpc_system_die,
+ };
+-struct xpc_arch_operations xpc_arch_ops;
++xpc_arch_operations_no_const xpc_arch_ops;
+ /*
+  * Timer function to enforce the timelimit on the partition disengage.
+@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
+               if (((die_args->trapnr == X86_TRAP_MF) ||
+                    (die_args->trapnr == X86_TRAP_XF)) &&
+-                  !user_mode_vm(die_args->regs))
++                  !user_mode(die_args->regs))
+                       xpc_die_deactivate();
+               break;
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index 49f04bc..65660c2 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
+       void *data_buf;
+       int is_on_stack;
+-      is_on_stack = object_is_on_stack(buf);
++      is_on_stack = object_starts_on_stack(buf);
+       if (is_on_stack) {
+               /*
+                * dma onto stack is unsafe/nonportable, but callers to this
+diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
+index 0b74189..818358f 100644
+--- a/drivers/mmc/host/dw_mmc.h
++++ b/drivers/mmc/host/dw_mmc.h
+@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
+       void            (*prepare_command)(struct dw_mci *host, u32 *cmdr);
+       void            (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
+       int             (*parse_dt)(struct dw_mci *host);
+-};
++} __do_const;
+ #endif /* _DW_MMC_H_ */
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index c6f6246..60760a8 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
+        * we can use overriding functions instead of default.
+        */
+       if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
+-              sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+-              sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+-              sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
++              pax_open_kernel();
++              *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
++              *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
++              *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
++              pax_close_kernel();
+       }
+       /* It supports additional host capabilities if needed */
+diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
+index 0c8bb6b..6f35deb 100644
+--- a/drivers/mtd/nand/denali.c
++++ b/drivers/mtd/nand/denali.c
+@@ -24,6 +24,7 @@
+ #include <linux/slab.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include "denali.h"
+diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
+index 51b9d6a..52af9a7 100644
+--- a/drivers/mtd/nftlmount.c
++++ b/drivers/mtd/nftlmount.c
+@@ -24,6 +24,7 @@
+ #include <asm/errno.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nftl.h>
+diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
+index f9d5615..99dd95f 100644
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ #define SM_CIS_VENDOR_OFFSET 0x59
+ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+ {
+-      struct attribute_group *attr_group;
++      attribute_group_no_const *attr_group;
+       struct attribute **attributes;
+       struct sm_sysfs_attribute *vendor_attribute;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f975696..4597e21 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
+       return tx_queues;
+ }
+-static struct rtnl_link_ops bond_link_ops __read_mostly = {
++static struct rtnl_link_ops bond_link_ops = {
+       .kind                   = "bond",
+       .priv_size              = sizeof(struct bonding),
+       .setup                  = bond_setup,
+@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
+       bond_destroy_debugfs();
+-      rtnl_link_unregister(&bond_link_ops);
+       unregister_pernet_subsys(&bond_net_ops);
++      rtnl_link_unregister(&bond_link_ops);
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       /*
+diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
+index e1d2643..7f4133b 100644
+--- a/drivers/net/ethernet/8390/ax88796.c
++++ b/drivers/net/ethernet/8390/ax88796.c
+@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
+       if (ax->plat->reg_offsets)
+               ei_local->reg_offset = ax->plat->reg_offsets;
+       else {
++              resource_size_t _mem_size = mem_size;
++              do_div(_mem_size, 0x18);
+               ei_local->reg_offset = ax->reg_offsets;
+               for (ret = 0; ret < 0x18; ret++)
+-                      ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
++                      ax->reg_offsets[ret] = _mem_size * ret;
+       }
+       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index 151675d..0139a9d 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+ {
+       /* RX_MODE controlling object */
+-      bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
++      bnx2x_init_rx_mode_obj(bp);
+       /* multicast configuration controlling object */
+       bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index ce1a916..10b52b0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+       struct bnx2x *bp = netdev_priv(dev);
+       /* Use the ethtool_dump "flag" field as the dump preset index */
++      if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
++              return -EINVAL;
++
+       bp->dump_preset_idx = val->flag;
+       return 0;
+ }
+@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
+       struct bnx2x *bp = netdev_priv(dev);
+       struct dump_header dump_hdr = {0};
+-      memset(p, 0, dump->len);
+-
+       /* Disable parity attentions as long as following dump may
+        * cause false alarms by reading never written registers. We
+        * will re-enable parity attentions right after the dump.
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index b4c9dea..2a9927f 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
+               bp->min_msix_vec_cnt = 2;
+       BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
++      bp->dump_preset_idx = 1;
++
+       return rc;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+index 32a9609..0b1c53a 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
+       return rc;
+ }
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+-                          struct bnx2x_rx_mode_obj *o)
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
+ {
+       if (CHIP_IS_E1x(bp)) {
+-              o->wait_comp      = bnx2x_empty_rx_mode_wait;
+-              o->config_rx_mode = bnx2x_set_rx_mode_e1x;
++              bp->rx_mode_obj.wait_comp      = bnx2x_empty_rx_mode_wait;
++              bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
+       } else {
+-              o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+-              o->config_rx_mode = bnx2x_set_rx_mode_e2;
++              bp->rx_mode_obj.wait_comp      = bnx2x_wait_rx_mode_comp_e2;
++              bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
+       }
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+index 43c00bc..dd1d03d 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
+ /********************* RX MODE ****************/
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+-                          struct bnx2x_rx_mode_obj *o);
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
+ /**
+  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index ff6e30e..87e8452 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -147,6 +147,7 @@
+ #define  CHIPREV_ID_5750_A0            0x4000
+ #define  CHIPREV_ID_5750_A1            0x4001
+ #define  CHIPREV_ID_5750_A3            0x4003
++#define  CHIPREV_ID_5750_C1            0x4201
+ #define  CHIPREV_ID_5750_C2            0x4202
+ #define  CHIPREV_ID_5752_A0_HW                 0x5000
+ #define  CHIPREV_ID_5752_A0            0x6000
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 71497e8..b650951 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
+       CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+                t3_read_reg(adapter, A_PCIE_PEX_ERR));
++      rtnl_lock();
+       t3_resume_ports(adapter);
++      rtnl_unlock();
+ }
+ static const struct pci_error_handlers t3_err_handler = {
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+index 8cffcdf..aadf043 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+  */
+ struct l2t_skb_cb {
+       arp_failure_handler_func arp_failure_handler;
+-};
++} __no_const;
+ #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index 4c83003..2a2a5b9 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+       for (i=0; i<ETH_ALEN; i++) {
+           tmp.addr[i] = dev->dev_addr[i];
+       }
+-      if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
++      if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+       break;
+     case DE4X5_SET_HWADDR:           /* Set the hardware address */
+@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+       spin_lock_irqsave(&lp->lock, flags);
+       memcpy(&statbuf, &lp->pktStats, ioc->len);
+       spin_unlock_irqrestore(&lp->lock, flags);
+-      if (copy_to_user(ioc->data, &statbuf, ioc->len))
++      if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
+               return -EFAULT;
+       break;
+     }
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 6e43426..1bd8365 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
+       if (wrapped)
+               newacc += 65536;
+-      ACCESS_ONCE(*acc) = newacc;
++      ACCESS_ONCE_RW(*acc) = newacc;
+ }
+ void populate_erx_stats(struct be_adapter *adapter,
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 21b85fb..b49e5fc 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -31,6 +31,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+ #include <net/ip.h>
+ #include "ftgmac100.h"
+diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
+index a6eda8d..935d273 100644
+--- a/drivers/net/ethernet/faraday/ftmac100.c
++++ b/drivers/net/ethernet/faraday/ftmac100.c
+@@ -31,6 +31,8 @@
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+ #include "ftmac100.h"
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+index 331987d..3be1135 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+       }
+       /* update the base incval used to calculate frequency adjustment */
+-      ACCESS_ONCE(adapter->base_incval) = incval;
++      ACCESS_ONCE_RW(adapter->base_incval) = incval;
+       smp_mb();
+       /* need lock to prevent incorrect read while modifying cyclecounter */
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+index fbe5363..266b4e3 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+       struct __vxge_hw_fifo *fifo;
+       struct vxge_hw_fifo_config *config;
+       u32 txdl_size, txdl_per_memblock;
+-      struct vxge_hw_mempool_cbs fifo_mp_callback;
++      static struct vxge_hw_mempool_cbs fifo_mp_callback = {
++              .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
++      };
++
+       struct __vxge_hw_virtualpath *vpath;
+       if ((vp == NULL) || (attr == NULL)) {
+@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+               goto exit;
+       }
+-      fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
+-
+       fifo->mempool =
+               __vxge_hw_mempool_create(vpath->hldev,
+                       fifo->config->memblock_size,
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+index 5e7fb1d..f8d1810 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
+               op_mode = QLC_83XX_DEFAULT_OPMODE;
+       if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
+-              adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
++              pax_open_kernel();
++              *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
++              pax_close_kernel();
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+       } else {
+               return -EIO;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+index b0c3de9..fc5857e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+       if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+               ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
++              pax_open_kernel();
++              *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
++              pax_close_kernel();
+       } else if (priv_level == QLCNIC_PRIV_FUNC) {
+               ahw->op_mode = QLCNIC_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
++              pax_open_kernel();
++              *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
++              pax_close_kernel();
+       } else if (priv_level == QLCNIC_MGMT_FUNC) {
+               ahw->op_mode = QLCNIC_MGMT_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
++              pax_open_kernel();
++              *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
++              pax_close_kernel();
+       } else {
+               return -EIO;
+       }
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+index 6acf82b..14b097e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
+       if (err) {
+               dev_info(&adapter->pdev->dev,
+                        "Failed to set driver version in firmware\n");
+-              return -EIO;
++              err = -EIO;
+       }
+-
+-      return 0;
++      qlcnic_free_mbx_args(&cmd);
++      return err;
+ }
+ int
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index d3f8797..82a03d3 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+       mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+       mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+-      memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
++      memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+       vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+       vlan_req->vlan_id = cpu_to_le16(vlan_id);
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 887aebe..9095ff9 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -524,6 +524,7 @@ rx_status_loop:
+                                        PCI_DMA_FROMDEVICE);
+               if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
+                       dev->stats.rx_dropped++;
++                      kfree_skb(new_skb);
+                       goto rx_next;
+               }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 393f961..d343034 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -753,22 +753,22 @@ struct rtl8169_private {
+       struct mdio_ops {
+               void (*write)(struct rtl8169_private *, int, int);
+               int (*read)(struct rtl8169_private *, int);
+-      } mdio_ops;
++      } __no_const mdio_ops;
+       struct pll_power_ops {
+               void (*down)(struct rtl8169_private *);
+               void (*up)(struct rtl8169_private *);
+-      } pll_power_ops;
++      } __no_const pll_power_ops;
+       struct jumbo_ops {
+               void (*enable)(struct rtl8169_private *);
+               void (*disable)(struct rtl8169_private *);
+-      } jumbo_ops;
++      } __no_const jumbo_ops;
+       struct csi_ops {
+               void (*write)(struct rtl8169_private *, int, int);
+               u32 (*read)(struct rtl8169_private *, int);
+-      } csi_ops;
++      } __no_const csi_ops;
+       int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+       int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index 9a95abf..36df7f9 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+                      (u32)((u64)ptp->start.dma_addr >> 32));
+       /* Clear flag that signals MC ready */
+-      ACCESS_ONCE(*start) = 0;
++      ACCESS_ONCE_RW(*start) = 0;
+       efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+                          MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index 50617c5..b13724c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+       writel(value, ioaddr + MMC_CNTRL);
+-      pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+-               MMC_CNTRL, value);
++//    pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
++//             MMC_CNTRL, value);
+ }
+ /* To mask all all interrupts.*/
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index e6fe0d8..2b7d752 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -101,7 +101,7 @@ struct rndis_device {
+       enum rndis_device_state state;
+       bool link_state;
+-      atomic_t new_req_id;
++      atomic_unchecked_t new_req_id;
+       spinlock_t request_lock;
+       struct list_head req_list;
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index 0775f0a..d4fb316 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+        * template
+        */
+       set = &rndis_msg->msg.set_req;
+-      set->req_id = atomic_inc_return(&dev->new_req_id);
++      set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+       /* Add to the request list */
+       spin_lock_irqsave(&dev->request_lock, flags);
+@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
+       /* Setup the rndis set */
+       halt = &request->request_msg.msg.halt_req;
+-      halt->req_id = atomic_inc_return(&dev->new_req_id);
++      halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+       /* Ignore return since this msg is optional. */
+       rndis_filter_send_request(dev, request);
+diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
+index bf0d55e..82bcfbd1 100644
+--- a/drivers/net/ieee802154/fakehard.c
++++ b/drivers/net/ieee802154/fakehard.c
+@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
+       phy->transmit_power = 0xbf;
+       dev->netdev_ops = &fake_ops;
+-      dev->ml_priv = &fake_mlme;
++      dev->ml_priv = (void *)&fake_mlme;
+       priv = netdev_priv(dev);
+       priv->phy = phy;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 6e91931..2b0ebe7 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+ int macvlan_link_register(struct rtnl_link_ops *ops)
+ {
+       /* common fields */
+-      ops->priv_size          = sizeof(struct macvlan_dev);
+-      ops->validate           = macvlan_validate;
+-      ops->maxtype            = IFLA_MACVLAN_MAX;
+-      ops->policy             = macvlan_policy;
+-      ops->changelink         = macvlan_changelink;
+-      ops->get_size           = macvlan_get_size;
+-      ops->fill_info          = macvlan_fill_info;
++      pax_open_kernel();
++      *(size_t *)&ops->priv_size      = sizeof(struct macvlan_dev);
++      *(void **)&ops->validate        = macvlan_validate;
++      *(int *)&ops->maxtype           = IFLA_MACVLAN_MAX;
++      *(const void **)&ops->policy    = macvlan_policy;
++      *(void **)&ops->changelink      = macvlan_changelink;
++      *(void **)&ops->get_size        = macvlan_get_size;
++      *(void **)&ops->fill_info       = macvlan_fill_info;
++      pax_close_kernel();
+       return rtnl_link_register(ops);
+ };
+@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block macvlan_notifier_block __read_mostly = {
++static struct notifier_block macvlan_notifier_block = {
+       .notifier_call  = macvlan_device_event,
+ };
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 523d6b2..5e16aa1 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block macvtap_notifier_block __read_mostly = {
++static struct notifier_block macvtap_notifier_block = {
+       .notifier_call  = macvtap_device_event,
+ };
+diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
+index daec9b0..6428fcb 100644
+--- a/drivers/net/phy/mdio-bitbang.c
++++ b/drivers/net/phy/mdio-bitbang.c
+@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
+       struct mdiobb_ctrl *ctrl = bus->priv;
+       module_put(ctrl->ops->owner);
++      mdiobus_unregister(bus);
+       mdiobus_free(bus);
+ }
+ EXPORT_SYMBOL(free_mdio_bitbang);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 72ff14b..11d442d 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+       void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+       struct ppp_stats stats;
+       struct ppp_comp_stats cstats;
+-      char *vers;
+       switch (cmd) {
+       case SIOCGPPPSTATS:
+@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               break;
+       case SIOCGPPPVER:
+-              vers = PPP_VERSION;
+-              if (copy_to_user(addr, vers, strlen(vers) + 1))
++              if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
+                       break;
+               err = 0;
+               break;
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 1252d9c..80e660b 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+       register struct tcphdr *thp;
+       register struct iphdr *ip;
+       register struct cstate *cs;
+-      int len, hdrlen;
++      long len, hdrlen;
+       unsigned char *cp = icp;
+       /* We've got a compressed packet; read the change byte */
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index b305105..8ead6df 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block team_notifier_block __read_mostly = {
++static struct notifier_block team_notifier_block = {
+       .notifier_call = team_device_event,
+ };
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 2491eb2..1a453eb 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+       u32 rxhash;
+       if (!(tun->flags & TUN_NO_PI)) {
+-              if ((len -= sizeof(pi)) > total_len)
++              if (len < sizeof(pi))
+                       return -EINVAL;
++              len -= sizeof(pi);
+               if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+                       return -EFAULT;
+@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+       }
+       if (tun->flags & TUN_VNET_HDR) {
+-              if ((len -= tun->vnet_hdr_sz) > total_len)
++              if (len < tun->vnet_hdr_sz)
+                       return -EINVAL;
++              len -= tun->vnet_hdr_sz;
+               if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+                       return -EFAULT;
+@@ -1869,7 +1871,7 @@ unlock:
+ }
+ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+-                          unsigned long arg, int ifreq_len)
++                          unsigned long arg, size_t ifreq_len)
+ {
+       struct tun_file *tfile = file->private_data;
+       struct tun_struct *tun;
+@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+       int vnet_hdr_sz;
+       int ret;
++      if (ifreq_len > sizeof ifr)
++              return -EFAULT;
++
+       if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
+               if (copy_from_user(&ifr, argp, ifreq_len))
+                       return -EFAULT;
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index cba1d46..f703766 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -71,7 +71,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+-
++#include <asm/local.h>
+ #define MOD_AUTHOR                    "Option Wireless"
+ #define MOD_DESCRIPTION                       "USB High Speed Option driver"
+@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
+       struct urb *urb;
+       urb = serial->rx_urb[0];
+-      if (serial->port.count > 0) {
++      if (atomic_read(&serial->port.count) > 0) {
+               count = put_rxbuf_data(urb, serial);
+               if (count == -1)
+                       return;
+@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
+       DUMP1(urb->transfer_buffer, urb->actual_length);
+       /* Anyone listening? */
+-      if (serial->port.count == 0)
++      if (atomic_read(&serial->port.count) == 0)
+               return;
+       if (status == 0) {
+@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
+       tty_port_tty_set(&serial->port, tty);
+       /* check for port already opened, if not set the termios */
+-      serial->port.count++;
+-      if (serial->port.count == 1) {
++      if (atomic_inc_return(&serial->port.count) == 1) {
+               serial->rx_state = RX_IDLE;
+               /* Force default termio settings */
+               _hso_serial_set_termios(tty, NULL);
+@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
+               result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+               if (result) {
+                       hso_stop_serial_device(serial->parent);
+-                      serial->port.count--;
++                      atomic_dec(&serial->port.count);
+                       kref_put(&serial->parent->ref, hso_serial_ref_free);
+               }
+       } else {
+@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
+       /* reset the rts and dtr */
+       /* do the actual close */
+-      serial->port.count--;
++      atomic_dec(&serial->port.count);
+-      if (serial->port.count <= 0) {
+-              serial->port.count = 0;
++      if (atomic_read(&serial->port.count) <= 0) {
++              atomic_set(&serial->port.count, 0);
+               tty_port_tty_set(&serial->port, NULL);
+               if (!usb_gone)
+                       hso_stop_serial_device(serial->parent);
+@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+       /* the actual setup */
+       spin_lock_irqsave(&serial->serial_lock, flags);
+-      if (serial->port.count)
++      if (atomic_read(&serial->port.count))
+               _hso_serial_set_termios(tty, old);
+       else
+               tty->termios = *old;
+@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
+                               D1("Pending read interrupt on port %d\n", i);
+                               spin_lock(&serial->serial_lock);
+                               if (serial->rx_state == RX_IDLE &&
+-                                      serial->port.count > 0) {
++                                      atomic_read(&serial->port.count) > 0) {
+                                       /* Setup and send a ctrl req read on
+                                        * port i */
+                                       if (!serial->rx_urb_filled[0]) {
+@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
+       /* Start all serial ports */
+       for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+               if (serial_table[i] && (serial_table[i]->interface == iface)) {
+-                      if (dev2ser(serial_table[i])->port.count) {
++                      if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
+                               result =
+                                   hso_start_serial_device(serial_table[i], GFP_NOIO);
+                               hso_kick_transmit(dev2ser(serial_table[i]));
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 57325f3..36b181f 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1579,7 +1579,7 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
++static struct rtnl_link_ops vxlan_link_ops = {
+       .kind           = "vxlan",
+       .maxtype        = IFLA_VXLAN_MAX,
+       .policy         = vxlan_policy,
+diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
+index 34c8a33..3261fdc 100644
+--- a/drivers/net/wireless/at76c50x-usb.c
++++ b/drivers/net/wireless/at76c50x-usb.c
+@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
+ }
+ /* Convert timeout from the DFU status to jiffies */
+-static inline unsigned long at76_get_timeout(struct dfu_status *s)
++static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
+ {
+       return msecs_to_jiffies((s->poll_timeout[2] << 16)
+                               | (s->poll_timeout[1] << 8)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index 8d78253..bebbb68 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+       ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+-      ACCESS_ONCE(ads->ds_link) = i->link;
+-      ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
++      ACCESS_ONCE_RW(ads->ds_link) = i->link;
++      ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
+       ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+       ctl6 = SM(i->keytype, AR_EncrType);
+@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       if ((i->is_first || i->is_last) &&
+           i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+-              ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
+                       | set11nTries(i->rates, 1)
+                       | set11nTries(i->rates, 2)
+                       | set11nTries(i->rates, 3)
+                       | (i->dur_update ? AR_DurUpdateEna : 0)
+                       | SM(0, AR_BurstDur);
+-              ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
+                       | set11nRate(i->rates, 1)
+                       | set11nRate(i->rates, 2)
+                       | set11nRate(i->rates, 3);
+       } else {
+-              ACCESS_ONCE(ads->ds_ctl2) = 0;
+-              ACCESS_ONCE(ads->ds_ctl3) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
+       }
+       if (!i->is_first) {
+-              ACCESS_ONCE(ads->ds_ctl0) = 0;
+-              ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+-              ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++              ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++              ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+               return;
+       }
+@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+               break;
+       }
+-      ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
++      ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+               | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+               | SM(i->txpower, AR_XmitPower)
+               | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+               | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+-      ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+-      ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++      ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++      ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+       if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+               return;
+-      ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1);
+-      ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
++      ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3);
+-      ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+               | set11nRateFlags(i->rates, 1)
+               | set11nRateFlags(i->rates, 2)
+               | set11nRateFlags(i->rates, 3)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+index 301bf72..3f5654f 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+             (i->qcu << AR_TxQcuNum_S) | desc_len;
+       checksum += val;
+-      ACCESS_ONCE(ads->info) = val;
++      ACCESS_ONCE_RW(ads->info) = val;
+       checksum += i->link;
+-      ACCESS_ONCE(ads->link) = i->link;
++      ACCESS_ONCE_RW(ads->link) = i->link;
+       checksum += i->buf_addr[0];
+-      ACCESS_ONCE(ads->data0) = i->buf_addr[0];
++      ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
+       checksum += i->buf_addr[1];
+-      ACCESS_ONCE(ads->data1) = i->buf_addr[1];
++      ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
+       checksum += i->buf_addr[2];
+-      ACCESS_ONCE(ads->data2) = i->buf_addr[2];
++      ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
+       checksum += i->buf_addr[3];
+-      ACCESS_ONCE(ads->data3) = i->buf_addr[3];
++      ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
+       checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl3) = val;
++      ACCESS_ONCE_RW(ads->ctl3) = val;
+       checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl5) = val;
++      ACCESS_ONCE_RW(ads->ctl5) = val;
+       checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl7) = val;
++      ACCESS_ONCE_RW(ads->ctl7) = val;
+       checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl9) = val;
++      ACCESS_ONCE_RW(ads->ctl9) = val;
+       checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+-      ACCESS_ONCE(ads->ctl10) = checksum;
++      ACCESS_ONCE_RW(ads->ctl10) = checksum;
+       if (i->is_first || i->is_last) {
+-              ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
+                       | set11nTries(i->rates, 1)
+                       | set11nTries(i->rates, 2)
+                       | set11nTries(i->rates, 3)
+                       | (i->dur_update ? AR_DurUpdateEna : 0)
+                       | SM(0, AR_BurstDur);
+-              ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
+                       | set11nRate(i->rates, 1)
+                       | set11nRate(i->rates, 2)
+                       | set11nRate(i->rates, 3);
+       } else {
+-              ACCESS_ONCE(ads->ctl13) = 0;
+-              ACCESS_ONCE(ads->ctl14) = 0;
++              ACCESS_ONCE_RW(ads->ctl13) = 0;
++              ACCESS_ONCE_RW(ads->ctl14) = 0;
+       }
+       ads->ctl20 = 0;
+@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       ctl17 = SM(i->keytype, AR_EncrType);
+       if (!i->is_first) {
+-              ACCESS_ONCE(ads->ctl11) = 0;
+-              ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+-              ACCESS_ONCE(ads->ctl15) = 0;
+-              ACCESS_ONCE(ads->ctl16) = 0;
+-              ACCESS_ONCE(ads->ctl17) = ctl17;
+-              ACCESS_ONCE(ads->ctl18) = 0;
+-              ACCESS_ONCE(ads->ctl19) = 0;
++              ACCESS_ONCE_RW(ads->ctl11) = 0;
++              ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
++              ACCESS_ONCE_RW(ads->ctl15) = 0;
++              ACCESS_ONCE_RW(ads->ctl16) = 0;
++              ACCESS_ONCE_RW(ads->ctl17) = ctl17;
++              ACCESS_ONCE_RW(ads->ctl18) = 0;
++              ACCESS_ONCE_RW(ads->ctl19) = 0;
+               return;
+       }
+-      ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
++      ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+               | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+               | SM(i->txpower, AR_XmitPower)
+               | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+       ctl12 |= SM(val, AR_PAPRDChainMask);
+-      ACCESS_ONCE(ads->ctl12) = ctl12;
+-      ACCESS_ONCE(ads->ctl17) = ctl17;
++      ACCESS_ONCE_RW(ads->ctl12) = ctl12;
++      ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+-      ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1);
+-      ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
++      ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3);
+-      ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
+               | set11nRateFlags(i->rates, 1)
+               | set11nRateFlags(i->rates, 2)
+               | set11nRateFlags(i->rates, 3)
+               | SM(i->rtscts_rate, AR_RTSCTSRate);
+-      ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
++      ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
+ }
+ static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index ae30343..a117806 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
+       /* ANI */
+       void (*ani_cache_ini_regs)(struct ath_hw *ah);
+-};
++} __no_const;
+ /**
+  * struct ath_spec_scan - parameters for Atheros spectral scan
+@@ -721,7 +721,7 @@ struct ath_hw_ops {
+                                    struct ath_spec_scan *param);
+       void (*spectral_scan_trigger)(struct ath_hw *ah);
+       void (*spectral_scan_wait)(struct ath_hw *ah);
+-};
++} __no_const;
+ struct ath_nf_limits {
+       s16 max;
+diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
+index b37a582..680835d 100644
+--- a/drivers/net/wireless/iwlegacy/3945-mac.c
++++ b/drivers/net/wireless/iwlegacy/3945-mac.c
+@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+        */
+       if (il3945_mod_params.disable_hw_scan) {
+               D_INFO("Disabling hw_scan\n");
+-              il3945_mac_ops.hw_scan = NULL;
++              pax_open_kernel();
++              *(void **)&il3945_mac_ops.hw_scan = NULL;
++              pax_close_kernel();
+       }
+       D_INFO("*** LOAD DRIVER ***\n");
+diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+index d532948..e0d8bb1 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
++++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[64];
+-      int buf_size;
++      size_t buf_size;
+       u32 offset, len;
+       memset(buf, 0, sizeof(buf));
+@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       u32 reset_flag;
+       memset(buf, 0, sizeof(buf));
+@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int ht40;
+       memset(buf, 0, sizeof(buf));
+@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int value;
+       memset(buf, 0, sizeof(buf));
+@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
+ DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
+ DEBUGFS_READ_FILE_OPS(current_sleep_command);
+-static const char *fmt_value = "  %-30s %10u\n";
+-static const char *fmt_hex   = "  %-30s       0x%02X\n";
+-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+-static const char *fmt_header =
++static const char fmt_value[] = "  %-30s %10u\n";
++static const char fmt_hex[]   = "  %-30s       0x%02X\n";
++static const char fmt_table[] = "  %-30s %10u  %10u  %10u  %10u\n";
++static const char fmt_header[] =
+       "%-32s    current  cumulative       delta         max\n";
+ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int clear;
+       memset(buf, 0, sizeof(buf));
+@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int trace;
+       memset(buf, 0, sizeof(buf));
+@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int missed;
+       memset(buf, 0, sizeof(buf));
+@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int plcp;
+       memset(buf, 0, sizeof(buf));
+@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int flush;
+       memset(buf, 0, sizeof(buf));
+@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int rts;
+       if (!priv->cfg->ht_params)
+@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       u32 event_log_flag;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       /* check that the interface is up */
+       if (!iwl_is_ready(priv))
+@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       u32 calib_disabled;
+-      int buf_size;
++      size_t buf_size;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 50ba0a4..29424e7 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+       struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       u32 reset_flag;
+       memset(buf, 0, sizeof(buf));
+@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ {
+       struct iwl_trans *trans = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int csr;
+       memset(buf, 0, sizeof(buf));
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index cb34c78..9fec0dc 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
+       if (channels > 1) {
+               hwsim_if_comb.num_different_channels = channels;
+-              mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+-              mac80211_hwsim_ops.cancel_hw_scan =
+-                      mac80211_hwsim_cancel_hw_scan;
+-              mac80211_hwsim_ops.sw_scan_start = NULL;
+-              mac80211_hwsim_ops.sw_scan_complete = NULL;
+-              mac80211_hwsim_ops.remain_on_channel =
+-                      mac80211_hwsim_roc;
+-              mac80211_hwsim_ops.cancel_remain_on_channel =
+-                      mac80211_hwsim_croc;
+-              mac80211_hwsim_ops.add_chanctx =
+-                      mac80211_hwsim_add_chanctx;
+-              mac80211_hwsim_ops.remove_chanctx =
+-                      mac80211_hwsim_remove_chanctx;
+-              mac80211_hwsim_ops.change_chanctx =
+-                      mac80211_hwsim_change_chanctx;
+-              mac80211_hwsim_ops.assign_vif_chanctx =
+-                      mac80211_hwsim_assign_vif_chanctx;
+-              mac80211_hwsim_ops.unassign_vif_chanctx =
+-                      mac80211_hwsim_unassign_vif_chanctx;
++              pax_open_kernel();
++              *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
++              *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
++              *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
++              *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
++              *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
++              *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
++              *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
++              *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
++              *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
++              *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
++              *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
++              pax_close_kernel();
+       }
+       spin_lock_init(&hwsim_radio_lock);
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index 8169a85..7fa3b47 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
+       netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
+-      if (rts_threshold < 0 || rts_threshold > 2347)
++      if (rts_threshold > 2347)
+               rts_threshold = 2347;
+       tmp = cpu_to_le32(rts_threshold);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index 7510723..5ba37f5 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -386,7 +386,7 @@ struct rt2x00_intf {
+        * for hardware which doesn't support hardware
+        * sequence counting.
+        */
+-      atomic_t seqno;
++      atomic_unchecked_t seqno;
+ };
+ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index d955741..8730748 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+        * sequence counter given by mac80211.
+        */
+       if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+-              seqno = atomic_add_return(0x10, &intf->seqno);
++              seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
+       else
+-              seqno = atomic_read(&intf->seqno);
++              seqno = atomic_read_unchecked(&intf->seqno);
+       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+       hdr->seq_ctrl |= cpu_to_le16(seqno);
+diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
+index e2b3d9c..67a5184 100644
+--- a/drivers/net/wireless/ti/wl1251/sdio.c
++++ b/drivers/net/wireless/ti/wl1251/sdio.c
+@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+               irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+-              wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+-              wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
++              pax_open_kernel();
++              *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
++              *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
++              pax_close_kernel();
+               wl1251_info("using dedicated interrupt line");
+       } else {
+-              wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+-              wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
++              pax_open_kernel();
++              *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
++              *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
++              pax_close_kernel();
+               wl1251_info("using SDIO interrupt");
+       }
+diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
+index 1c627da..69f7d17 100644
+--- a/drivers/net/wireless/ti/wl12xx/main.c
++++ b/drivers/net/wireless/ti/wl12xx/main.c
+@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+                      sizeof(wl->conf.mem));
+               /* read data preparation is only needed by wl127x */
+-              wl->ops->prepare_read = wl127x_prepare_read;
++              pax_open_kernel();
++              *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
++              pax_close_kernel();
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+                      sizeof(wl->conf.mem));
+               /* read data preparation is only needed by wl127x */
+-              wl->ops->prepare_read = wl127x_prepare_read;
++              pax_open_kernel();
++              *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
++              pax_close_kernel();
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
+index 9fa692d..b31fee0 100644
+--- a/drivers/net/wireless/ti/wl18xx/main.c
++++ b/drivers/net/wireless/ti/wl18xx/main.c
+@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
+       }
+       if (!checksum_param) {
+-              wl18xx_ops.set_rx_csum = NULL;
+-              wl18xx_ops.init_vif = NULL;
++              pax_open_kernel();
++              *(void **)&wl18xx_ops.set_rx_csum = NULL;
++              *(void **)&wl18xx_ops.init_vif = NULL;
++              pax_close_kernel();
+       }
+       /* Enable 11a Band only if we have 5G antennas */
+diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
+index 7ef0b4a..ff65c28 100644
+--- a/drivers/net/wireless/zd1211rw/zd_usb.c
++++ b/drivers/net/wireless/zd1211rw/zd_usb.c
+@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
+ {
+       struct zd_usb *usb = urb->context;
+       struct zd_usb_interrupt *intr = &usb->intr;
+-      int len;
++      unsigned int len;
+       u16 int_num;
+       ZD_ASSERT(in_interrupt());
+diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
+index d93b2b6..ae50401 100644
+--- a/drivers/oprofile/buffer_sync.c
++++ b/drivers/oprofile/buffer_sync.c
+@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
+               if (cookie == NO_COOKIE)
+                       offset = pc;
+               if (cookie == INVALID_COOKIE) {
+-                      atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++                      atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+                       offset = pc;
+               }
+               if (cookie != last_cookie) {
+@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
+       /* add userspace sample */
+       if (!mm) {
+-              atomic_inc(&oprofile_stats.sample_lost_no_mm);
++              atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+               return 0;
+       }
+       cookie = lookup_dcookie(mm, s->eip, &offset);
+       if (cookie == INVALID_COOKIE) {
+-              atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++              atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+               return 0;
+       }
+@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
+               /* ignore backtraces if failed to add a sample */
+               if (state == sb_bt_start) {
+                       state = sb_bt_ignore;
+-                      atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++                      atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+               }
+       }
+       release_mm(mm);
+diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
+index c0cc4e7..44d4e54 100644
+--- a/drivers/oprofile/event_buffer.c
++++ b/drivers/oprofile/event_buffer.c
+@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
+       }
+       if (buffer_pos == buffer_size) {
+-              atomic_inc(&oprofile_stats.event_lost_overflow);
++              atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+               return;
+       }
+diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
+index ed2c3ec..deda85a 100644
+--- a/drivers/oprofile/oprof.c
++++ b/drivers/oprofile/oprof.c
+@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
+       if (oprofile_ops.switch_events())
+               return;
+-      atomic_inc(&oprofile_stats.multiplex_counter);
++      atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
+       start_switch_worker();
+ }
+diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
+index 84a208d..d61b0a1 100644
+--- a/drivers/oprofile/oprofile_files.c
++++ b/drivers/oprofile/oprofile_files.c
+@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
+ #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+-static ssize_t timeout_read(struct file *file, char __user *buf,
++static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
+               size_t count, loff_t *offset)
+ {
+       return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
+diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
+index 917d28e..d62d981 100644
+--- a/drivers/oprofile/oprofile_stats.c
++++ b/drivers/oprofile/oprofile_stats.c
+@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
+               cpu_buf->sample_invalid_eip = 0;
+       }
+-      atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+-      atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+-      atomic_set(&oprofile_stats.event_lost_overflow, 0);
+-      atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+-      atomic_set(&oprofile_stats.multiplex_counter, 0);
++      atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++      atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++      atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++      atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
++      atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
+ }
+diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
+index 38b6fc0..b5cbfce 100644
+--- a/drivers/oprofile/oprofile_stats.h
++++ b/drivers/oprofile/oprofile_stats.h
+@@ -13,11 +13,11 @@
+ #include <linux/atomic.h>
+ struct oprofile_stat_struct {
+-      atomic_t sample_lost_no_mm;
+-      atomic_t sample_lost_no_mapping;
+-      atomic_t bt_lost_no_mapping;
+-      atomic_t event_lost_overflow;
+-      atomic_t multiplex_counter;
++      atomic_unchecked_t sample_lost_no_mm;
++      atomic_unchecked_t sample_lost_no_mapping;
++      atomic_unchecked_t bt_lost_no_mapping;
++      atomic_unchecked_t event_lost_overflow;
++      atomic_unchecked_t multiplex_counter;
+ };
+ extern struct oprofile_stat_struct oprofile_stats;
+diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
+index 7c12d9c..558bf3bb 100644
+--- a/drivers/oprofile/oprofilefs.c
++++ b/drivers/oprofile/oprofilefs.c
+@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
+ int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+-      char const *name, atomic_t *val)
++      char const *name, atomic_unchecked_t *val)
+ {
+       return __oprofilefs_create_file(sb, root, name,
+                                       &atomic_ro_fops, 0444, val);
+diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
+index 93404f7..4a313d8 100644
+--- a/drivers/oprofile/timer_int.c
++++ b/drivers/oprofile/timer_int.c
+@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata oprofile_cpu_notifier = {
++static struct notifier_block oprofile_cpu_notifier = {
+       .notifier_call = oprofile_cpu_notify,
+ };
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index 92ed045..62d39bd7 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
+       *ppos += len;
+-      return copy_to_user(result, buffer, len) ? -EFAULT : 0;
++      return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
+ }
+ #ifdef CONFIG_PARPORT_1284
+@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
+       *ppos += len;
+-      return copy_to_user (result, buffer, len) ? -EFAULT : 0;
++      return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
+ }
+ #endif /* IEEE1284.3 support. */
+diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
+index c35e8ad..fc33beb 100644
+--- a/drivers/pci/hotplug/acpiphp_ibm.c
++++ b/drivers/pci/hotplug/acpiphp_ibm.c
+@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
+               goto init_cleanup;
+       }
+-      ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
++      pax_open_kernel();
++      *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
++      pax_close_kernel();
+       retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
+       return retval;
+diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
+index a6a71c4..c91097b 100644
+--- a/drivers/pci/hotplug/cpcihp_generic.c
++++ b/drivers/pci/hotplug/cpcihp_generic.c
+@@ -73,7 +73,6 @@ static u16 port;
+ static unsigned int enum_bit;
+ static u8 enum_mask;
+-static struct cpci_hp_controller_ops generic_hpc_ops;
+ static struct cpci_hp_controller generic_hpc;
+ static int __init validate_parameters(void)
+@@ -139,6 +138,10 @@ static int query_enum(void)
+       return ((value & enum_mask) == enum_mask);
+ }
++static struct cpci_hp_controller_ops generic_hpc_ops = {
++      .query_enum = query_enum,
++};
++
+ static int __init cpcihp_generic_init(void)
+ {
+       int status;
+@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
+       pci_dev_put(dev);
+       memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
+-      generic_hpc_ops.query_enum = query_enum;
+       generic_hpc.ops = &generic_hpc_ops;
+       status = cpci_hp_register_controller(&generic_hpc);
+diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
+index 449b4bb..257e2e8 100644
+--- a/drivers/pci/hotplug/cpcihp_zt5550.c
++++ b/drivers/pci/hotplug/cpcihp_zt5550.c
+@@ -59,7 +59,6 @@
+ /* local variables */
+ static bool debug;
+ static bool poll;
+-static struct cpci_hp_controller_ops zt5550_hpc_ops;
+ static struct cpci_hp_controller zt5550_hpc;
+ /* Primary cPCI bus bridge device */
+@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
+       return 0;
+ }
++static struct cpci_hp_controller_ops zt5550_hpc_ops = {
++      .query_enum = zt5550_hc_query_enum,
++};
++
+ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+       int status;
+@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
+       dbg("returned from zt5550_hc_config");
+       memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
+-      zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
+       zt5550_hpc.ops = &zt5550_hpc_ops;
+       if(!poll) {
+               zt5550_hpc.irq = hc_dev->irq;
+               zt5550_hpc.irq_flags = IRQF_SHARED;
+               zt5550_hpc.dev_id = hc_dev;
+-              zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+-              zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+-              zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++              pax_open_kernel();
++              *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
++              *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
++              *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++              pax_open_kernel();
+       } else {
+               info("using ENUM# polling mode");
+       }
+diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
+index 76ba8a1..20ca857 100644
+--- a/drivers/pci/hotplug/cpqphp_nvram.c
++++ b/drivers/pci/hotplug/cpqphp_nvram.c
+@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
+ void compaq_nvram_init (void __iomem *rom_start)
+ {
++
++#ifndef CONFIG_PAX_KERNEXEC
+       if (rom_start) {
+               compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+       }
++#endif
++
+       dbg("int15 entry  = %p\n", compaq_int15_entry_point);
+       /* initialize our int15 lock */
+diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
+index ec20f74..c1d961e 100644
+--- a/drivers/pci/hotplug/pci_hotplug_core.c
++++ b/drivers/pci/hotplug/pci_hotplug_core.c
+@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+               return -EINVAL;
+       }
+-      slot->ops->owner = owner;
+-      slot->ops->mod_name = mod_name;
++      pax_open_kernel();
++      *(struct module **)&slot->ops->owner = owner;
++      *(const char **)&slot->ops->mod_name = mod_name;
++      pax_close_kernel();
+       mutex_lock(&pci_hp_mutex);
+       /*
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 7d72c5e..edce02c 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
+       struct slot *slot = ctrl->slot;
+       struct hotplug_slot *hotplug = NULL;
+       struct hotplug_slot_info *info = NULL;
+-      struct hotplug_slot_ops *ops = NULL;
++      hotplug_slot_ops_no_const *ops = NULL;
+       char name[SLOT_NAME_SIZE];
+       int retval = -ENOMEM;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 5b4a9d9..cd5ac1f 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ {
+       /* allocate attribute structure, piggyback attribute name */
+       int name_len = write_combine ? 13 : 10;
+-      struct bin_attribute *res_attr;
++      bin_attribute_no_const *res_attr;
+       int retval;
+       res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
+@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
+ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+ {
+       int retval;
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       /* If the device has VPD, try to expose it in sysfs. */
+       if (dev->vpd) {
+@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
+ {
+       int retval;
+       int rom_size = 0;
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       if (!sysfs_initialized)
+               return -EACCES;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index d1182c4..2a138ec 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -92,7 +92,7 @@ struct pci_vpd_ops {
+ struct pci_vpd {
+       unsigned int len;
+       const struct pci_vpd_ops *ops;
+-      struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
++      bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
+ };
+ int pci_vpd_pci22_init(struct pci_dev *dev);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index d320df6..ca9a8f6 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -27,9 +27,9 @@
+ #define MODULE_PARAM_PREFIX "pcie_aspm."
+ /* Note: those are not register definitions */
+-#define ASPM_STATE_L0S_UP     (1)     /* Upstream direction L0s state */
+-#define ASPM_STATE_L0S_DW     (2)     /* Downstream direction L0s state */
+-#define ASPM_STATE_L1         (4)     /* L1 state */
++#define ASPM_STATE_L0S_UP     (1U)    /* Upstream direction L0s state */
++#define ASPM_STATE_L0S_DW     (2U)    /* Downstream direction L0s state */
++#define ASPM_STATE_L1         (4U)    /* L1 state */
+ #define ASPM_STATE_L0S                (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+ #define ASPM_STATE_ALL                (ASPM_STATE_L0S | ASPM_STATE_L1)
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index ea37072..10e58e56 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+       struct pci_bus_region region;
+       bool bar_too_big = false, bar_disabled = false;
+-      mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
++      mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
+       /* No printks while decoding is disabled! */
+       if (!dev->mmio_always_on) {
+diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
+index 0812608..b04018c4 100644
+--- a/drivers/pci/proc.c
++++ b/drivers/pci/proc.c
+@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
+ static int __init pci_proc_init(void)
+ {
+       struct pci_dev *dev = NULL;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+       proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
++#endif
+       proc_create("devices", 0, proc_bus_pci_dir,
+                   &proc_bus_pci_dev_operations);
+       proc_initialized = 1;
+diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
+index 3e5b4497..dcdfb70 100644
+--- a/drivers/platform/x86/chromeos_laptop.c
++++ b/drivers/platform/x86/chromeos_laptop.c
+@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+       return 0;
+ }
+-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
++static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
+       {
+               .ident = "Samsung Series 5 550 - Touchpad",
+               .matches = {
+diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
+index 6b22938..bc9700e 100644
+--- a/drivers/platform/x86/msi-laptop.c
++++ b/drivers/platform/x86/msi-laptop.c
+@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
+       if (!quirks->ec_read_only) {
+               /* allow userland write sysfs file  */
+-              dev_attr_bluetooth.store = store_bluetooth;
+-              dev_attr_wlan.store = store_wlan;
+-              dev_attr_threeg.store = store_threeg;
+-              dev_attr_bluetooth.attr.mode |= S_IWUSR;
+-              dev_attr_wlan.attr.mode |= S_IWUSR;
+-              dev_attr_threeg.attr.mode |= S_IWUSR;
++              pax_open_kernel();
++              *(void **)&dev_attr_bluetooth.store = store_bluetooth;
++              *(void **)&dev_attr_wlan.store = store_wlan;
++              *(void **)&dev_attr_threeg.store = store_threeg;
++              *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
++              *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
++              *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
++              pax_close_kernel();
+       }
+       /* disable hardware control by fn key */
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index 2ac045f..39c443d 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+ }
+ /* High speed charging function */
+-static struct device_attribute *hsc_handle;
++static device_attribute_no_const *hsc_handle;
+ static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+               struct device_attribute *attr,
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 54d31c0..3f896d3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
+       return 0;
+ }
+-void static hotkey_mask_warn_incomplete_mask(void)
++static void hotkey_mask_warn_incomplete_mask(void)
+ {
+       /* log only what the user can fix... */
+       const u32 wantedmask = hotkey_driver_mask &
+@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
+       }
+ }
+-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+-                                         struct tp_nvram_state *newn,
+-                                         const u32 event_mask)
+-{
+-
+ #define TPACPI_COMPARE_KEY(__scancode, __member) \
+       do { \
+               if ((event_mask & (1 << __scancode)) && \
+@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+                       tpacpi_hotkey_send_key(__scancode); \
+       } while (0)
+-      void issue_volchange(const unsigned int oldvol,
+-                           const unsigned int newvol)
+-      {
+-              unsigned int i = oldvol;
++static void issue_volchange(const unsigned int oldvol,
++                          const unsigned int newvol,
++                          const u32 event_mask)
++{
++      unsigned int i = oldvol;
+-              while (i > newvol) {
+-                      TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+-                      i--;
+-              }
+-              while (i < newvol) {
+-                      TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+-                      i++;
+-              }
++      while (i > newvol) {
++              TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
++              i--;
+       }
++      while (i < newvol) {
++              TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
++              i++;
++      }
++}
+-      void issue_brightnesschange(const unsigned int oldbrt,
+-                                  const unsigned int newbrt)
+-      {
+-              unsigned int i = oldbrt;
++static void issue_brightnesschange(const unsigned int oldbrt,
++                                 const unsigned int newbrt,
++                                 const u32 event_mask)
++{
++      unsigned int i = oldbrt;
+-              while (i > newbrt) {
+-                      TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+-                      i--;
+-              }
+-              while (i < newbrt) {
+-                      TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+-                      i++;
+-              }
++      while (i > newbrt) {
++              TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
++              i--;
++      }
++      while (i < newbrt) {
++              TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
++              i++;
+       }
++}
++static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
++                                         struct tp_nvram_state *newn,
++                                         const u32 event_mask)
++{
+       TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+       TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+       TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+                   oldn->volume_level != newn->volume_level) {
+                       /* recently muted, or repeated mute keypress, or
+                        * multiple presses ending in mute */
+-                      issue_volchange(oldn->volume_level, newn->volume_level);
++                      issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+                       TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+               }
+       } else {
+@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+                       TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+               }
+               if (oldn->volume_level != newn->volume_level) {
+-                      issue_volchange(oldn->volume_level, newn->volume_level);
++                      issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+               } else if (oldn->volume_toggle != newn->volume_toggle) {
+                       /* repeated vol up/down keypress at end of scale ? */
+                       if (newn->volume_level == 0)
+@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+       /* handle brightness */
+       if (oldn->brightness_level != newn->brightness_level) {
+               issue_brightnesschange(oldn->brightness_level,
+-                                     newn->brightness_level);
++                                     newn->brightness_level,
++                                     event_mask);
+       } else if (oldn->brightness_toggle != newn->brightness_toggle) {
+               /* repeated key presses that didn't change state */
+               if (newn->brightness_level == 0)
+@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+                               && !tp_features.bright_unkfw)
+                       TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+       }
++}
+ #undef TPACPI_COMPARE_KEY
+ #undef TPACPI_MAY_SEND_KEY
+-}
+ /*
+  * Polling driver
+diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
+index 769d265..a3a05ca 100644
+--- a/drivers/pnp/pnpbios/bioscalls.c
++++ b/drivers/pnp/pnpbios/bioscalls.c
+@@ -58,7 +58,7 @@ do { \
+       set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
+ } while(0)
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+                       (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+ /*
+@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+       cpu = get_cpu();
+       save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++      pax_open_kernel();
+       get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       /* On some boxes IRQ's during PnP BIOS calls are deadly.  */
+       spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+                            :"memory");
+       spin_unlock_irqrestore(&pnp_bios_lock, flags);
++      pax_open_kernel();
+       get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
+       return status;
+ }
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+       int i;
+@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+       pnp_bios_callpoint.offset = header->fields.pm16offset;
+       pnp_bios_callpoint.segment = PNP_CS16;
++      pax_open_kernel();
++
+       for_each_possible_cpu(i) {
+               struct desc_struct *gdt = get_cpu_gdt_table(i);
+               if (!gdt)
+@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+               set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+                        (unsigned long)__va(header->fields.pm16dseg));
+       }
++
++      pax_close_kernel();
+ }
+diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
+index 3e6db1c..1fbbdae 100644
+--- a/drivers/pnp/resource.c
++++ b/drivers/pnp/resource.c
+@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
+               return 1;
+       /* check if the resource is valid */
+-      if (*irq < 0 || *irq > 15)
++      if (*irq > 15)
+               return 0;
+       /* check if the resource is reserved */
+@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
+               return 1;
+       /* check if the resource is valid */
+-      if (*dma < 0 || *dma == 4 || *dma > 7)
++      if (*dma == 4 || *dma > 7)
+               return 0;
+       /* check if the resource is reserved */
+diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
+index 0c52e2a..3421ab7 100644
+--- a/drivers/power/pda_power.c
++++ b/drivers/power/pda_power.c
+@@ -37,7 +37,11 @@ static int polling;
+ #if IS_ENABLED(CONFIG_USB_PHY)
+ static struct usb_phy *transceiver;
+-static struct notifier_block otg_nb;
++static int otg_handle_notification(struct notifier_block *nb,
++              unsigned long event, void *unused);
++static struct notifier_block otg_nb = {
++      .notifier_call = otg_handle_notification
++};
+ #endif
+ static struct regulator *ac_draw;
+@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
+ #if IS_ENABLED(CONFIG_USB_PHY)
+       if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
+-              otg_nb.notifier_call = otg_handle_notification;
+               ret = usb_register_notifier(transceiver, &otg_nb);
+               if (ret) {
+                       dev_err(dev, "failure to register otg notifier\n");
+diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
+index cc439fd..8fa30df 100644
+--- a/drivers/power/power_supply.h
++++ b/drivers/power/power_supply.h
+@@ -16,12 +16,12 @@ struct power_supply;
+ #ifdef CONFIG_SYSFS
+-extern void power_supply_init_attrs(struct device_type *dev_type);
++extern void power_supply_init_attrs(void);
+ extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
+ #else
+-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
++static inline void power_supply_init_attrs(void) {}
+ #define power_supply_uevent NULL
+ #endif /* CONFIG_SYSFS */
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index 1c517c3..ffa2f17 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -24,7 +24,10 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+-static struct device_type power_supply_dev_type;
++extern const struct attribute_group *power_supply_attr_groups[];
++static struct device_type power_supply_dev_type = {
++      .groups = power_supply_attr_groups,
++};
+ static bool __power_supply_is_supplied_by(struct power_supply *supplier,
+                                        struct power_supply *supply)
+@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
+               return PTR_ERR(power_supply_class);
+       power_supply_class->dev_uevent = power_supply_uevent;
+-      power_supply_init_attrs(&power_supply_dev_type);
++      power_supply_init_attrs();
+       return 0;
+ }
+diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
+index 29178f7..c65f324 100644
+--- a/drivers/power/power_supply_sysfs.c
++++ b/drivers/power/power_supply_sysfs.c
+@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
+       .is_visible = power_supply_attr_is_visible,
+ };
+-static const struct attribute_group *power_supply_attr_groups[] = {
++const struct attribute_group *power_supply_attr_groups[] = {
+       &power_supply_attr_group,
+       NULL,
+ };
+-void power_supply_init_attrs(struct device_type *dev_type)
++void power_supply_init_attrs(void)
+ {
+       int i;
+-      dev_type->groups = power_supply_attr_groups;
+-
+       for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
+               __power_supply_attrs[i] = &power_supply_attrs[i].attr;
+ }
+diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
+index d428ef9..fdc0357 100644
+--- a/drivers/regulator/max8660.c
++++ b/drivers/regulator/max8660.c
+@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
+               max8660->shadow_regs[MAX8660_OVER1] = 5;
+       } else {
+               /* Otherwise devices can be toggled via software */
+-              max8660_dcdc_ops.enable = max8660_dcdc_enable;
+-              max8660_dcdc_ops.disable = max8660_dcdc_disable;
++              pax_open_kernel();
++              *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
++              *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
++              pax_close_kernel();
+       }
+       /*
+diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
+index adb1414..c13e0ce 100644
+--- a/drivers/regulator/max8973-regulator.c
++++ b/drivers/regulator/max8973-regulator.c
+@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
+       if (!pdata->enable_ext_control) {
+               max->desc.enable_reg = MAX8973_VOUT;
+               max->desc.enable_mask = MAX8973_VOUT_ENABLE;
+-              max8973_dcdc_ops.enable = regulator_enable_regmap;
+-              max8973_dcdc_ops.disable = regulator_disable_regmap;
+-              max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
++              pax_open_kernel();
++              *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
++              *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
++              *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
++              pax_close_kernel();
+       }
+       max->enable_external_control = pdata->enable_ext_control;
+diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
+index b716283..3cc4349 100644
+--- a/drivers/regulator/mc13892-regulator.c
++++ b/drivers/regulator/mc13892-regulator.c
+@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
+       }
+       mc13xxx_unlock(mc13892);
+-      mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
++      pax_open_kernel();
++      *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+               = mc13892_vcam_set_mode;
+-      mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
++      *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+               = mc13892_vcam_get_mode;
++      pax_close_kernel();
+       mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+                                       ARRAY_SIZE(mc13892_regulators));
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index f1cb706..4c7832a 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+       hpet_rtc_timer_init();
+       /* export at least the first block of NVRAM */
+-      nvram.size = address_space - NVRAM_OFFSET;
++      pax_open_kernel();
++      *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
++      pax_close_kernel();
+       retval = sysfs_create_bin_file(&dev->kobj, &nvram);
+       if (retval < 0) {
+               dev_dbg(dev, "can't create nvram file? %d\n", retval);
+diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
+index d049393..bb20be0 100644
+--- a/drivers/rtc/rtc-dev.c
++++ b/drivers/rtc/rtc-dev.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include "rtc-core.h"
+ static dev_t rtc_devt;
+@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
+               if (copy_from_user(&tm, uarg, sizeof(tm)))
+                       return -EFAULT;
++              gr_log_timechange();
++
+               return rtc_set_time(rtc, &tm);
+       case RTC_PIE_ON:
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index b53992a..776df84 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -107,7 +107,7 @@ struct ds1307 {
+       u8                      offset; /* register's offset */
+       u8                      regs[11];
+       u16                     nvram_offset;
+-      struct bin_attribute    *nvram;
++      bin_attribute_no_const  *nvram;
+       enum ds_type            type;
+       unsigned long           flags;
+ #define HAS_NVRAM     0               /* bit 0 == sysfs file active */
+diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
+index 130f29a..6179d03 100644
+--- a/drivers/rtc/rtc-m48t59.c
++++ b/drivers/rtc/rtc-m48t59.c
+@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
+               goto out;
+       }
+-      m48t59_nvram_attr.size = pdata->offset;
++      pax_open_kernel();
++      *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
++      pax_close_kernel();
+       ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+       if (ret) {
+diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
+index e693af6..2e525b6 100644
+--- a/drivers/scsi/bfa/bfa_fcpim.h
++++ b/drivers/scsi/bfa/bfa_fcpim.h
+@@ -36,7 +36,7 @@ struct bfa_iotag_s {
+ struct bfa_itn_s {
+       bfa_isr_func_t isr;
+-};
++} __no_const;
+ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 23a90e7..9cf04ee 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
+       bfa_ioc_disable_cbfn_t  disable_cbfn;
+       bfa_ioc_hbfail_cbfn_t   hbfail_cbfn;
+       bfa_ioc_reset_cbfn_t    reset_cbfn;
+-};
++} __no_const;
+ /*
+  * IOC event notification mechanism.
+@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
+       void            (*ioc_sync_ack)         (struct bfa_ioc_s *ioc);
+       bfa_boolean_t   (*ioc_sync_complete)    (struct bfa_ioc_s *ioc);
+       bfa_boolean_t   (*ioc_lpu_read_stat)    (struct bfa_ioc_s *ioc);
+-};
++} __no_const;
+ /*
+  * Queue element to wait for room in request queue. FIFO order is
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index df0c3c7..b00e1d0 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -42,7 +42,7 @@
+ #include "scsi_logging.h"
+-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0);   /* host_no for next new host */
++static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
+ static void scsi_host_cls_release(struct device *dev)
+@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+        * subtract one because we increment first then return, but we need to
+        * know what the next host number was before increment
+        */
+-      shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
++      shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
+       shost->dma_channel = 0xff;
+       /* These three are default values which can be overridden */
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 7f4f790..b75b92a 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
+       unsigned long flags;
+       if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+-              return h->access.command_completed(h, q);
++              return h->access->command_completed(h, q);
+       if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+               a = rq->head[rq->current_entry];
+@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
+       while (!list_empty(&h->reqQ)) {
+               c = list_entry(h->reqQ.next, struct CommandList, list);
+               /* can't do anything if fifo is full */
+-              if ((h->access.fifo_full(h))) {
++              if ((h->access->fifo_full(h))) {
+                       dev_warn(&h->pdev->dev, "fifo full\n");
+                       break;
+               }
+@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
+               /* Tell the controller execute command */
+               spin_unlock_irqrestore(&h->lock, flags);
+-              h->access.submit_command(h, c);
++              h->access->submit_command(h, c);
+               spin_lock_irqsave(&h->lock, flags);
+       }
+       spin_unlock_irqrestore(&h->lock, flags);
+@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
+ static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
+ {
+-      return h->access.command_completed(h, q);
++      return h->access->command_completed(h, q);
+ }
+ static inline bool interrupt_pending(struct ctlr_info *h)
+ {
+-      return h->access.intr_pending(h);
++      return h->access->intr_pending(h);
+ }
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+-      return (h->access.intr_pending(h) == 0) ||
++      return (h->access->intr_pending(h) == 0) ||
+               (h->interrupts_enabled == 0);
+ }
+@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
+       if (prod_index < 0)
+               return -ENODEV;
+       h->product_name = products[prod_index].product_name;
+-      h->access = *(products[prod_index].access);
++      h->access = products[prod_index].access;
+       pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+                              PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+       assert_spin_locked(&lockup_detector_lock);
+       remove_ctlr_from_lockup_detector_list(h);
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       spin_lock_irqsave(&h->lock, flags);
+       h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+       spin_unlock_irqrestore(&h->lock, flags);
+@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
+       }
+       /* make sure the board interrupts are off */
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+               goto clean2;
+@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
+                * fake ones to scoop up any residual completions.
+                */
+               spin_lock_irqsave(&h->lock, flags);
+-              h->access.set_intr_mask(h, HPSA_INTR_OFF);
++              h->access->set_intr_mask(h, HPSA_INTR_OFF);
+               spin_unlock_irqrestore(&h->lock, flags);
+               free_irqs(h);
+               rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
+@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
+               dev_info(&h->pdev->dev, "Board READY.\n");
+               dev_info(&h->pdev->dev,
+                       "Waiting for stale completions to drain.\n");
+-              h->access.set_intr_mask(h, HPSA_INTR_ON);
++              h->access->set_intr_mask(h, HPSA_INTR_ON);
+               msleep(10000);
+-              h->access.set_intr_mask(h, HPSA_INTR_OFF);
++              h->access->set_intr_mask(h, HPSA_INTR_OFF);
+               rc = controller_reset_failed(h->cfgtable);
+               if (rc)
+@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
+       }
+       /* Turn the interrupts on so we can service requests */
+-      h->access.set_intr_mask(h, HPSA_INTR_ON);
++      h->access->set_intr_mask(h, HPSA_INTR_ON);
+       hpsa_hba_inquiry(h);
+       hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
+@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+        * To write all data in the battery backed cache to disks
+        */
+       hpsa_flush_cache(h);
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       hpsa_free_irqs_and_disable_msix(h);
+ }
+@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+               return;
+       }
+       /* Change the access methods to the performant access methods */
+-      h->access = SA5_performant_access;
++      h->access = &SA5_performant_access;
+       h->transMethod = CFGTBL_Trans_Performant;
+ }
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 9816479..c5d4e97 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -79,7 +79,7 @@ struct ctlr_info {
+       unsigned int msix_vector;
+       unsigned int msi_vector;
+       int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
+-      struct access_method access;
++      struct access_method *access;
+       /* queue and queue Info */
+       struct list_head reqQ;
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 8b928c6..9c76300 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -100,12 +100,12 @@ struct fc_exch_mgr {
+       u16             pool_max_index;
+       struct {
+-              atomic_t no_free_exch;
+-              atomic_t no_free_exch_xid;
+-              atomic_t xid_not_found;
+-              atomic_t xid_busy;
+-              atomic_t seq_not_found;
+-              atomic_t non_bls_resp;
++              atomic_unchecked_t no_free_exch;
++              atomic_unchecked_t no_free_exch_xid;
++              atomic_unchecked_t xid_not_found;
++              atomic_unchecked_t xid_busy;
++              atomic_unchecked_t seq_not_found;
++              atomic_unchecked_t non_bls_resp;
+       } stats;
+ };
+@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+       /* allocate memory for exchange */
+       ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+       if (!ep) {
+-              atomic_inc(&mp->stats.no_free_exch);
++              atomic_inc_unchecked(&mp->stats.no_free_exch);
+               goto out;
+       }
+       memset(ep, 0, sizeof(*ep));
+@@ -797,7 +797,7 @@ out:
+       return ep;
+ err:
+       spin_unlock_bh(&pool->lock);
+-      atomic_inc(&mp->stats.no_free_exch_xid);
++      atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+       mempool_free(ep, mp->ep_pool);
+       return NULL;
+ }
+@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+               xid = ntohs(fh->fh_ox_id);      /* we originated exch */
+               ep = fc_exch_find(mp, xid);
+               if (!ep) {
+-                      atomic_inc(&mp->stats.xid_not_found);
++                      atomic_inc_unchecked(&mp->stats.xid_not_found);
+                       reject = FC_RJT_OX_ID;
+                       goto out;
+               }
+@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+               ep = fc_exch_find(mp, xid);
+               if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+                       if (ep) {
+-                              atomic_inc(&mp->stats.xid_busy);
++                              atomic_inc_unchecked(&mp->stats.xid_busy);
+                               reject = FC_RJT_RX_ID;
+                               goto rel;
+                       }
+@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+                       }
+                       xid = ep->xid;  /* get our XID */
+               } else if (!ep) {
+-                      atomic_inc(&mp->stats.xid_not_found);
++                      atomic_inc_unchecked(&mp->stats.xid_not_found);
+                       reject = FC_RJT_RX_ID;  /* XID not found */
+                       goto out;
+               }
+@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+       } else {
+               sp = &ep->seq;
+               if (sp->id != fh->fh_seq_id) {
+-                      atomic_inc(&mp->stats.seq_not_found);
++                      atomic_inc_unchecked(&mp->stats.seq_not_found);
+                       if (f_ctl & FC_FC_END_SEQ) {
+                               /*
+                                * Update sequence_id based on incoming last
+@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+       ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+       if (!ep) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto out;
+       }
+       if (ep->esb_stat & ESB_ST_COMPLETE) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       if (ep->rxid == FC_XID_UNKNOWN)
+               ep->rxid = ntohs(fh->fh_rx_id);
+       if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       if (ep->did != ntoh24(fh->fh_s_id) &&
+           ep->did != FC_FID_FLOGI) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       sof = fr_sof(fp);
+@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+               sp->ssb_stat |= SSB_ST_RESP;
+               sp->id = fh->fh_seq_id;
+       } else if (sp->id != fh->fh_seq_id) {
+-              atomic_inc(&mp->stats.seq_not_found);
++              atomic_inc_unchecked(&mp->stats.seq_not_found);
+               goto rel;
+       }
+@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+       sp = fc_seq_lookup_orig(mp, fp);        /* doesn't hold sequence */
+       if (!sp)
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+       else
+-              atomic_inc(&mp->stats.non_bls_resp);
++              atomic_inc_unchecked(&mp->stats.non_bls_resp);
+       fc_frame_free(fp);
+ }
+@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
+       list_for_each_entry(ema, &lport->ema_list, ema_list) {
+               mp = ema->mp;
+-              st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
++              st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
+               st->fc_no_free_exch_xid +=
+-                              atomic_read(&mp->stats.no_free_exch_xid);
+-              st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
+-              st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
+-              st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
+-              st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
++                              atomic_read_unchecked(&mp->stats.no_free_exch_xid);
++              st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
++              st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
++              st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
++              st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
+       }
+ }
+ EXPORT_SYMBOL(fc_exch_update_stats);
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 161c98e..6d563b3 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
+       .postreset              = ata_std_postreset,
+       .error_handler          = ata_std_error_handler,
+       .post_internal_cmd      = sas_ata_post_internal,
+-      .qc_defer               = ata_std_qc_defer,
++      .qc_defer               = ata_std_qc_defer,
+       .qc_prep                = ata_noop_qc_prep,
+       .qc_issue               = sas_ata_qc_issue,
+       .qc_fill_rtf            = sas_ata_qc_fill_rtf,
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index bcc56ca..6f4174a 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -431,7 +431,7 @@ struct lpfc_vport {
+       struct dentry *debug_nodelist;
+       struct dentry *vport_debugfs_root;
+       struct lpfc_debugfs_trc *disc_trc;
+-      atomic_t disc_trc_cnt;
++      atomic_unchecked_t disc_trc_cnt;
+ #endif
+       uint8_t stat_data_enabled;
+       uint8_t stat_data_blocked;
+@@ -865,8 +865,8 @@ struct lpfc_hba {
+       struct timer_list fabric_block_timer;
+       unsigned long bit_flags;
+ #define       FABRIC_COMANDS_BLOCKED  0
+-      atomic_t num_rsrc_err;
+-      atomic_t num_cmd_success;
++      atomic_unchecked_t num_rsrc_err;
++      atomic_unchecked_t num_cmd_success;
+       unsigned long last_rsrc_error_time;
+       unsigned long last_ramp_down_time;
+       unsigned long last_ramp_up_time;
+@@ -902,7 +902,7 @@ struct lpfc_hba {
+       struct dentry *debug_slow_ring_trc;
+       struct lpfc_debugfs_trc *slow_ring_trc;
+-      atomic_t slow_ring_trc_cnt;
++      atomic_unchecked_t slow_ring_trc_cnt;
+       /* iDiag debugfs sub-directory */
+       struct dentry *idiag_root;
+       struct dentry *idiag_pci_cfg;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index f525ecb..32549a4 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+ #include <linux/debugfs.h>
+-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+ static unsigned long lpfc_debugfs_start_time = 0L;
+ /* iDiag */
+@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+       lpfc_debugfs_enable = 0;
+       len = 0;
+-      index = (atomic_read(&vport->disc_trc_cnt) + 1) &
++      index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
+               (lpfc_debugfs_max_disc_trc - 1);
+       for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+               dtp = vport->disc_trc + i;
+@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+       lpfc_debugfs_enable = 0;
+       len = 0;
+-      index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
++      index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
+               (lpfc_debugfs_max_slow_ring_trc - 1);
+       for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+               dtp = phba->slow_ring_trc + i;
+@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+               !vport || !vport->disc_trc)
+               return;
+-      index = atomic_inc_return(&vport->disc_trc_cnt) &
++      index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
+               (lpfc_debugfs_max_disc_trc - 1);
+       dtp = vport->disc_trc + index;
+       dtp->fmt = fmt;
+       dtp->data1 = data1;
+       dtp->data2 = data2;
+       dtp->data3 = data3;
+-      dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++      dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+       dtp->jif = jiffies;
+ #endif
+       return;
+@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+               !phba || !phba->slow_ring_trc)
+               return;
+-      index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
++      index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
+               (lpfc_debugfs_max_slow_ring_trc - 1);
+       dtp = phba->slow_ring_trc + index;
+       dtp->fmt = fmt;
+       dtp->data1 = data1;
+       dtp->data2 = data2;
+       dtp->data3 = data3;
+-      dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++      dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+       dtp->jif = jiffies;
+ #endif
+       return;
+@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+                                                "slow_ring buffer\n");
+                               goto debug_failed;
+                       }
+-                      atomic_set(&phba->slow_ring_trc_cnt, 0);
++                      atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
+                       memset(phba->slow_ring_trc, 0,
+                               (sizeof(struct lpfc_debugfs_trc) *
+                               lpfc_debugfs_max_slow_ring_trc));
+@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+                                "buffer\n");
+               goto debug_failed;
+       }
+-      atomic_set(&vport->disc_trc_cnt, 0);
++      atomic_set_unchecked(&vport->disc_trc_cnt, 0);
+       snprintf(name, sizeof(name), "discovery_trace");
+       vport->debug_disc_trc =
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index cb465b2..2e7b25f 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -10950,8 +10950,10 @@ lpfc_init(void)
+                       "misc_register returned with status %d", error);
+       if (lpfc_enable_npiv) {
+-              lpfc_transport_functions.vport_create = lpfc_vport_create;
+-              lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++              pax_open_kernel();
++              *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
++              *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++              pax_close_kernel();
+       }
+       lpfc_transport_template =
+                               fc_attach_transport(&lpfc_transport_functions);
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 8523b278e..ce1d812 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+       uint32_t evt_posted;
+       spin_lock_irqsave(&phba->hbalock, flags);
+-      atomic_inc(&phba->num_rsrc_err);
++      atomic_inc_unchecked(&phba->num_rsrc_err);
+       phba->last_rsrc_error_time = jiffies;
+       if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
+       unsigned long flags;
+       struct lpfc_hba *phba = vport->phba;
+       uint32_t evt_posted;
+-      atomic_inc(&phba->num_cmd_success);
++      atomic_inc_unchecked(&phba->num_cmd_success);
+       if (vport->cfg_lun_queue_depth <= queue_depth)
+               return;
+@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+       unsigned long num_rsrc_err, num_cmd_success;
+       int i;
+-      num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+-      num_cmd_success = atomic_read(&phba->num_cmd_success);
++      num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
++      num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
+       /*
+        * The error and success command counters are global per
+@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+                       }
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+-      atomic_set(&phba->num_rsrc_err, 0);
+-      atomic_set(&phba->num_cmd_success, 0);
++      atomic_set_unchecked(&phba->num_rsrc_err, 0);
++      atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+ /**
+@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+                       }
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+-      atomic_set(&phba->num_rsrc_err, 0);
+-      atomic_set(&phba->num_cmd_success, 0);
++      atomic_set_unchecked(&phba->num_rsrc_err, 0);
++      atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+ /**
+diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
+index 8e1b737..50ff510 100644
+--- a/drivers/scsi/pmcraid.c
++++ b/drivers/scsi/pmcraid.c
+@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+               res->scsi_dev = scsi_dev;
+               scsi_dev->hostdata = res;
+               res->change_detected = 0;
+-              atomic_set(&res->read_failures, 0);
+-              atomic_set(&res->write_failures, 0);
++              atomic_set_unchecked(&res->read_failures, 0);
++              atomic_set_unchecked(&res->write_failures, 0);
+               rc = 0;
+       }
+       spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
+       /* If this was a SCSI read/write command keep count of errors */
+       if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+-              atomic_inc(&res->read_failures);
++              atomic_inc_unchecked(&res->read_failures);
+       else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+-              atomic_inc(&res->write_failures);
++              atomic_inc_unchecked(&res->write_failures);
+       if (!RES_IS_GSCSI(res->cfg_entry) &&
+               masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
+        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+        * hrrq_id assigned here in queuecommand
+        */
+-      ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++      ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+                         pinstance->num_hrrq;
+       cmd->cmd_done = pmcraid_io_done;
+@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
+        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+        * hrrq_id assigned here in queuecommand
+        */
+-      ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++      ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+                         pinstance->num_hrrq;
+       if (request_size) {
+@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
+       pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+       /* add resources only after host is added into system */
+-      if (!atomic_read(&pinstance->expose_resources))
++      if (!atomic_read_unchecked(&pinstance->expose_resources))
+               return;
+       fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+       init_waitqueue_head(&pinstance->reset_wait_q);
+       atomic_set(&pinstance->outstanding_cmds, 0);
+-      atomic_set(&pinstance->last_message_id, 0);
+-      atomic_set(&pinstance->expose_resources, 0);
++      atomic_set_unchecked(&pinstance->last_message_id, 0);
++      atomic_set_unchecked(&pinstance->expose_resources, 0);
+       INIT_LIST_HEAD(&pinstance->free_res_q);
+       INIT_LIST_HEAD(&pinstance->used_res_q);
+@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
+       /* Schedule worker thread to handle CCN and take care of adding and
+        * removing devices to OS
+        */
+-      atomic_set(&pinstance->expose_resources, 1);
++      atomic_set_unchecked(&pinstance->expose_resources, 1);
+       schedule_work(&pinstance->worker_q);
+       return rc;
+diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
+index e1d150f..6c6df44 100644
+--- a/drivers/scsi/pmcraid.h
++++ b/drivers/scsi/pmcraid.h
+@@ -748,7 +748,7 @@ struct pmcraid_instance {
+       struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+       /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+-      atomic_t last_message_id;
++      atomic_unchecked_t last_message_id;
+       /* configuration table */
+       struct pmcraid_config_table *cfg_table;
+@@ -777,7 +777,7 @@ struct pmcraid_instance {
+       atomic_t outstanding_cmds;
+       /* should add/delete resources to mid-layer now ?*/
+-      atomic_t expose_resources;
++      atomic_unchecked_t expose_resources;
+@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
+               struct pmcraid_config_table_entry_ext cfg_entry_ext;
+       };
+       struct scsi_device *scsi_dev;   /* Link scsi_device structure */
+-      atomic_t read_failures;         /* count of failed READ commands */
+-      atomic_t write_failures;        /* count of failed WRITE commands */
++      atomic_unchecked_t read_failures;       /* count of failed READ commands */
++      atomic_unchecked_t write_failures;      /* count of failed WRITE commands */
+       /* To indicate add/delete/modify during CCN */
+       u8 change_detected;
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index bf60c63..74d4dce 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+       return 0;
+ }
+-struct fc_function_template qla2xxx_transport_functions = {
++fc_function_template_no_const qla2xxx_transport_functions = {
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
+       .bsg_timeout = qla24xx_bsg_timeout,
+ };
+-struct fc_function_template qla2xxx_transport_vport_functions = {
++fc_function_template_no_const qla2xxx_transport_vport_functions = {
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 026bfde..90c4018 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
+ struct device_attribute;
+ extern struct device_attribute *qla2x00_host_attrs[];
+ struct fc_function_template;
+-extern struct fc_function_template qla2xxx_transport_functions;
+-extern struct fc_function_template qla2xxx_transport_vport_functions;
++extern fc_function_template_no_const qla2xxx_transport_functions;
++extern fc_function_template_no_const qla2xxx_transport_vport_functions;
+ extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index ad72c1d..afc9a98 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+                   !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+                       /* Ok, a 64bit DMA mask is applicable. */
+                       ha->flags.enable_64bit_addressing = 1;
+-                      ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+-                      ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++                      pax_open_kernel();
++                      *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
++                      *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++                      pax_close_kernel();
+                       return;
+               }
+       }
+diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
+index ddf16a8..80f4dd0 100644
+--- a/drivers/scsi/qla4xxx/ql4_def.h
++++ b/drivers/scsi/qla4xxx/ql4_def.h
+@@ -291,7 +291,7 @@ struct ddb_entry {
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+-      atomic_t relogin_retry_count;     /* Num of times relogin has been
++      atomic_unchecked_t relogin_retry_count;   /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 4d231c1..2892c37 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+-                      atomic_inc(&ddb_entry->relogin_retry_count);
++                      atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+-                              atomic_read(&ddb_entry->relogin_retry_count),
++                              atomic_read_unchecked(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+-      atomic_set(&ddb_entry->relogin_retry_count, 0);
++      atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+       def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_relogin_timeout =
+               (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index eaa808e..95f8841 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+       unsigned long timeout;
+       int rtn = 0;
+-      atomic_inc(&cmd->device->iorequest_cnt);
++      atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+       /* check if the device is still usable */
+       if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 86d5220..f22c51a 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
+       shost = sdev->host;
+       scsi_init_cmd_errh(cmd);
+       cmd->result = DID_NO_CONNECT << 16;
+-      atomic_inc(&cmd->device->iorequest_cnt);
++      atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+       /*
+        * SCSI request completion path will do scsi_device_unbusy(),
+@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
+       INIT_LIST_HEAD(&cmd->eh_entry);
+-      atomic_inc(&cmd->device->iodone_cnt);
++      atomic_inc_unchecked(&cmd->device->iodone_cnt);
+       if (cmd->result)
+-              atomic_inc(&cmd->device->ioerr_cnt);
++              atomic_inc_unchecked(&cmd->device->ioerr_cnt);
+       disposition = scsi_decide_disposition(cmd);
+       if (disposition != SUCCESS &&
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 931a7d9..0c2a754 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr,     \
+                   char *buf)                                          \
+ {                                                                     \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+-      unsigned long long count = atomic_read(&sdev->field);           \
++      unsigned long long count = atomic_read_unchecked(&sdev->field); \
+       return snprintf(buf, 20, "0x%llx\n", count);                    \
+ }                                                                     \
+ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
+index 84a1fdf..693b0d6 100644
+--- a/drivers/scsi/scsi_tgt_lib.c
++++ b/drivers/scsi/scsi_tgt_lib.c
+@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+       int err;
+       dprintk("%lx %u\n", uaddr, len);
+-      err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
++      err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
+       if (err) {
+               /*
+                * TODO: need to fixup sg_tablesize, max_segment_size,
+diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
+index e106c27..11a380e 100644
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+  * Netlink Infrastructure
+  */
+-static atomic_t fc_event_seq;
++static atomic_unchecked_t fc_event_seq;
+ /**
+  * fc_get_event_number - Obtain the next sequential FC event number
+@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
+ u32
+ fc_get_event_number(void)
+ {
+-      return atomic_add_return(1, &fc_event_seq);
++      return atomic_add_return_unchecked(1, &fc_event_seq);
+ }
+ EXPORT_SYMBOL(fc_get_event_number);
+@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
+ {
+       int error;
+-      atomic_set(&fc_event_seq, 0);
++      atomic_set_unchecked(&fc_event_seq, 0);
+       error = transport_class_register(&fc_host_class);
+       if (error)
+@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
+       char *cp;
+       *val = simple_strtoul(buf, &cp, 0);
+-      if ((*cp && (*cp != '\n')) || (*val < 0))
++      if (*cp && (*cp != '\n'))
+               return -EINVAL;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 133926b..903000d 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -80,7 +80,7 @@ struct iscsi_internal {
+       struct transport_container session_cont;
+ };
+-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+ static DEFINE_IDA(iscsi_sess_ida);
+@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+       int err;
+       ihost = shost->shost_data;
+-      session->sid = atomic_add_return(1, &iscsi_session_nr);
++      session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
+       if (target_id == ISCSI_MAX_TARGET) {
+               id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
+       printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+               ISCSI_TRANSPORT_VERSION);
+-      atomic_set(&iscsi_session_nr, 0);
++      atomic_set_unchecked(&iscsi_session_nr, 0);
+       err = class_register(&iscsi_transport_class);
+       if (err)
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index f379c7f..e8fc69c 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -33,7 +33,7 @@
+ #include "scsi_transport_srp_internal.h"
+ struct srp_host_attrs {
+-      atomic_t next_port_id;
++      atomic_unchecked_t next_port_id;
+ };
+ #define to_srp_host_attrs(host)       ((struct srp_host_attrs *)(host)->shost_data)
+@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
+       struct Scsi_Host *shost = dev_to_shost(dev);
+       struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+-      atomic_set(&srp_host->next_port_id, 0);
++      atomic_set_unchecked(&srp_host->next_port_id, 0);
+       return 0;
+ }
+@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
+       memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
+       rport->roles = ids->roles;
+-      id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
++      id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
+       dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+       transport_setup_device(&rport->dev);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 610417e..1544fa9 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
+       sdkp->disk = gd;
+       sdkp->index = index;
+       atomic_set(&sdkp->openers, 0);
+-      atomic_set(&sdkp->device->ioerr_cnt, 0);
++      atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
+       if (!sdp->request_queue->rq_timeout) {
+               if (sdp->type != TYPE_MOD)
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index df5e961..df6b97f 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+                                      sdp->disk->disk_name,
+                                      MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+                                      NULL,
+-                                     (char *)arg);
++                                     (char __user *)arg);
+       case BLKTRACESTART:
+               return blk_trace_startstop(sdp->device->request_queue, 1);
+       case BLKTRACESTOP:
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 32b7bb1..2f1c4bd 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
+ EXPORT_SYMBOL_GPL(spi_bus_unlock);
+ /* portable code must never pass more than 32 bytes */
+-#define       SPI_BUFSIZ      max(32,SMP_CACHE_BYTES)
++#define       SPI_BUFSIZ      max(32UL,SMP_CACHE_BYTES)
+ static u8     *buf;
+diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
+index 3675020..e80d92c 100644
+--- a/drivers/staging/media/solo6x10/solo6x10-core.c
++++ b/drivers/staging/media/solo6x10/solo6x10-core.c
+@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
+ static int solo_sysfs_init(struct solo_dev *solo_dev)
+ {
+-      struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
++      bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
+       struct device *dev = &solo_dev->dev;
+       const char *driver;
+       int i;
+diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
+index 34afc16..ffe44dd 100644
+--- a/drivers/staging/octeon/ethernet-rx.c
++++ b/drivers/staging/octeon/ethernet-rx.c
+@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+                               /* Increment RX stats for virtual ports */
+                               if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+ #ifdef CONFIG_64BIT
+-                                      atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
+-                                      atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
++                                      atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
++                                      atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
+ #else
+-                                      atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
+-                                      atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
++                                      atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
++                                      atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
+ #endif
+                               }
+                               netif_receive_skb(skb);
+@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+                                          dev->name);
+                               */
+ #ifdef CONFIG_64BIT
+-                              atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
++                              atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+-                              atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
++                              atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+                               dev_kfree_skb_irq(skb);
+                       }
+diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
+index c3a90e7..023619a 100644
+--- a/drivers/staging/octeon/ethernet.c
++++ b/drivers/staging/octeon/ethernet.c
+@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+                * since the RX tasklet also increments it.
+                */
+ #ifdef CONFIG_64BIT
+-              atomic64_add(rx_status.dropped_packets,
+-                           (atomic64_t *)&priv->stats.rx_dropped);
++              atomic64_add_unchecked(rx_status.dropped_packets,
++                           (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+-              atomic_add(rx_status.dropped_packets,
+-                           (atomic_t *)&priv->stats.rx_dropped);
++              atomic_add_unchecked(rx_status.dropped_packets,
++                           (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+       }
+diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
+index dc23395..cf7e9b1 100644
+--- a/drivers/staging/rtl8712/rtl871x_io.h
++++ b/drivers/staging/rtl8712/rtl871x_io.h
+@@ -108,7 +108,7 @@ struct     _io_ops {
+                         u8 *pmem);
+       u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+                          u8 *pmem);
+-};
++} __no_const;
+ struct io_req {
+       struct list_head list;
+diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
+index 1f5088b..0e59820 100644
+--- a/drivers/staging/sbe-2t3e3/netdev.c
++++ b/drivers/staging/sbe-2t3e3/netdev.c
+@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+       t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
+       if (rlen)
+-              if (copy_to_user(data, &resp, rlen))
++              if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
+                       return -EFAULT;
+       return 0;
+diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
+index a863a98..d272795 100644
+--- a/drivers/staging/usbip/vhci.h
++++ b/drivers/staging/usbip/vhci.h
+@@ -83,7 +83,7 @@ struct vhci_hcd {
+       unsigned resuming:1;
+       unsigned long re_timeout;
+-      atomic_t seqnum;
++      atomic_unchecked_t seqnum;
+       /*
+        * NOTE:
+diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
+index d7974cb..d78076b 100644
+--- a/drivers/staging/usbip/vhci_hcd.c
++++ b/drivers/staging/usbip/vhci_hcd.c
+@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
+       spin_lock(&vdev->priv_lock);
+-      priv->seqnum = atomic_inc_return(&the_controller->seqnum);
++      priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+       if (priv->seqnum == 0xffff)
+               dev_info(&urb->dev->dev, "seqnum max\n");
+@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+                       return -ENOMEM;
+               }
+-              unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
++              unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+               if (unlink->seqnum == 0xffff)
+                       pr_info("seqnum max\n");
+@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
+               vdev->rhport = rhport;
+       }
+-      atomic_set(&vhci->seqnum, 0);
++      atomic_set_unchecked(&vhci->seqnum, 0);
+       spin_lock_init(&vhci->lock);
+       hcd->power_budget = 0; /* no limit */
+diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
+index d07fcb5..358e1e1 100644
+--- a/drivers/staging/usbip/vhci_rx.c
++++ b/drivers/staging/usbip/vhci_rx.c
+@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+       if (!urb) {
+               pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+               pr_info("max seqnum %d\n",
+-                      atomic_read(&the_controller->seqnum));
++                      atomic_read_unchecked(&the_controller->seqnum));
+               usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+               return;
+       }
+diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
+index 8417c2f..ef5ebd6 100644
+--- a/drivers/staging/vt6655/hostap.c
++++ b/drivers/staging/vt6655/hostap.c
+@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
+  *
+  */
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ {
+       PSDevice apdev_priv;
+       struct net_device *dev = pDevice->dev;
+       int ret;
+-      const struct net_device_ops apdev_netdev_ops = {
+-              .ndo_start_xmit         = pDevice->tx_80211,
+-      };
+       DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+       *apdev_priv = *pDevice;
+       memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
++      /* only half broken now */
++      apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+       pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+       pDevice->apdev->type = ARPHRD_IEEE80211;
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index c699a30..b90a5fd 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -60,14 +60,13 @@ static int          msglevel                =MSG_LEVEL_INFO;
+  *
+  */
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
+ {
+       struct vnt_private *apdev_priv;
+       struct net_device *dev = pDevice->dev;
+       int ret;
+-      const struct net_device_ops apdev_netdev_ops = {
+-              .ndo_start_xmit = pDevice->tx_80211,
+-      };
+     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
+     *apdev_priv = *pDevice;
+       memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
++      /* only half broken now */
++      apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+       pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+       pDevice->apdev->type = ARPHRD_IEEE80211;
+diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
+index d128ce2..fc1f9a1 100644
+--- a/drivers/staging/zcache/tmem.h
++++ b/drivers/staging/zcache/tmem.h
+@@ -225,7 +225,7 @@ struct tmem_pamops {
+       bool (*is_remote)(void *);
+       int (*replace_in_obj)(void *, struct tmem_obj *);
+ #endif
+-};
++} __no_const;
+ extern void tmem_register_pamops(struct tmem_pamops *m);
+ /* memory allocation methods provided by the host implementation */
+@@ -234,7 +234,7 @@ struct tmem_hostops {
+       void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
+       struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
+       void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
+-};
++} __no_const;
+ extern void tmem_register_hostops(struct tmem_hostops *m);
+ /* core tmem accessor functions */
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 4630481..c26782a 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+       spin_lock_init(&dev->se_port_lock);
+       spin_lock_init(&dev->se_tmr_lock);
+       spin_lock_init(&dev->qf_cmd_lock);
+-      atomic_set(&dev->dev_ordered_id, 0);
++      atomic_set_unchecked(&dev->dev_ordered_id, 0);
+       INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+       spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+       INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 21e3158..43c6004 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
+        * Used to determine when ORDERED commands should go from
+        * Dormant to Active status.
+        */
+-      cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
++      cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
+       smp_mb__after_atomic_inc();
+       pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+                       cmd->se_ordered_id, cmd->sam_task_attr,
+diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
+index 33f83fe..d80f8e1 100644
+--- a/drivers/tty/cyclades.c
++++ b/drivers/tty/cyclades.c
+@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
+       printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
+                       info->port.count);
+ #endif
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+ #ifdef CY_DEBUG_COUNT
+       printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
+-              current->pid, info->port.count);
++              current->pid, atomic_read(&info->port.count));
+ #endif
+       /*
+@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
+               for (j = 0; j < cy_card[i].nports; j++) {
+                       info = &cy_card[i].ports[j];
+-                      if (info->port.count) {
++                      if (atomic_read(&info->port.count)) {
+                               /* XXX is the ldisc num worth this? */
+                               struct tty_struct *tty;
+                               struct tty_ldisc *ld;
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index eb255e8..f637a57 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+       spin_lock_irqsave(&hp->port.lock, flags);
+       /* Check and then increment for fast path open. */
+-      if (hp->port.count++ > 0) {
++      if (atomic_inc_return(&hp->port.count) > 1) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               hvc_kick();
+               return 0;
+@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+       spin_lock_irqsave(&hp->port.lock, flags);
+-      if (--hp->port.count == 0) {
++      if (atomic_dec_return(&hp->port.count) == 0) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               /* We are done with the tty pointer now. */
+               tty_port_tty_set(&hp->port, NULL);
+@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+                */
+               tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
+       } else {
+-              if (hp->port.count < 0)
++              if (atomic_read(&hp->port.count) < 0)
+                       printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
+-                              hp->vtermno, hp->port.count);
++                              hp->vtermno, atomic_read(&hp->port.count));
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+       }
+ }
+@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
+        * open->hangup case this can be called after the final close so prevent
+        * that from happening for now.
+        */
+-      if (hp->port.count <= 0) {
++      if (atomic_read(&hp->port.count) <= 0) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               return;
+       }
+-      hp->port.count = 0;
++      atomic_set(&hp->port.count, 0);
+       spin_unlock_irqrestore(&hp->port.lock, flags);
+       tty_port_tty_set(&hp->port, NULL);
+@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
+               return -EPIPE;
+       /* FIXME what's this (unprotected) check for? */
+-      if (hp->port.count <= 0)
++      if (atomic_read(&hp->port.count) <= 0)
+               return -EIO;
+       spin_lock_irqsave(&hp->lock, flags);
+diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
+index 81e939e..95ead10 100644
+--- a/drivers/tty/hvc/hvcs.c
++++ b/drivers/tty/hvc/hvcs.c
+@@ -83,6 +83,7 @@
+ #include <asm/hvcserver.h>
+ #include <asm/uaccess.h>
+ #include <asm/vio.h>
++#include <asm/local.h>
+ /*
+  * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      if (hvcsd->port.count > 0) {
++      if (atomic_read(&hvcsd->port.count) > 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               printk(KERN_INFO "HVCS: vterm state unchanged.  "
+                               "The hvcs device node is still in use.\n");
+@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
+               }
+       }
+-      hvcsd->port.count = 0;
++      atomic_set(&hvcsd->port.count, 0);
+       hvcsd->port.tty = tty;
+       tty->driver_data = hvcsd;
+@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
+       unsigned long flags;
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      hvcsd->port.count++;
++      atomic_inc(&hvcsd->port.count);
+       hvcsd->todo_mask |= HVCS_SCHED_READ;
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+       hvcsd = tty->driver_data;
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      if (--hvcsd->port.count == 0) {
++      if (atomic_dec_and_test(&hvcsd->port.count)) {
+               vio_disable_interrupts(hvcsd->vdev);
+@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+               free_irq(irq, hvcsd);
+               return;
+-      } else if (hvcsd->port.count < 0) {
++      } else if (atomic_read(&hvcsd->port.count) < 0) {
+               printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+                               " is missmanaged.\n",
+-              hvcsd->vdev->unit_address, hvcsd->port.count);
++              hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
+       }
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       /* Preserve this so that we know how many kref refs to put */
+-      temp_open_count = hvcsd->port.count;
++      temp_open_count = atomic_read(&hvcsd->port.count);
+       /*
+        * Don't kref put inside the spinlock because the destruction
+@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+       tty->driver_data = NULL;
+       hvcsd->port.tty = NULL;
+-      hvcsd->port.count = 0;
++      atomic_set(&hvcsd->port.count, 0);
+       /* This will drop any buffered data on the floor which is OK in a hangup
+        * scenario. */
+@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
+        * the middle of a write operation?  This is a crummy place to do this
+        * but we want to keep it all in the spinlock.
+        */
+-      if (hvcsd->port.count <= 0) {
++      if (atomic_read(&hvcsd->port.count) <= 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               return -ENODEV;
+       }
+@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
+ {
+       struct hvcs_struct *hvcsd = tty->driver_data;
+-      if (!hvcsd || hvcsd->port.count <= 0)
++      if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
+               return 0;
+       return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index 8fd72ff..34a0bed 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -29,6 +29,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_flip.h>
+ #include <linux/uaccess.h>
++#include <asm/local.h>
+ #include "tty.h"
+ #include "network.h"
+@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return -ENODEV;
+       }
+-      if (tty->port.count == 0)
++      if (atomic_read(&tty->port.count) == 0)
+               tty->tx_bytes_queued = 0;
+-      tty->port.count++;
++      atomic_inc(&tty->port.count);
+       tty->port.tty = linux_tty;
+       linux_tty->driver_data = tty;
+@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+-      tty->port.count--;
+-
+-      if (tty->port.count == 0) {
++      if (atomic_dec_return(&tty->port.count) == 0) {
+               struct tty_struct *linux_tty = tty->port.tty;
+               if (linux_tty != NULL) {
+@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
+               return;
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (tty->port.count == 0) {
++      if (atomic_read(&tty->port.count) == 0) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return;
+       }
+@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (!tty->port.count) {
++      if (!atomic_read(&tty->port.count)) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return;
+       }
+@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
+               return -ENODEV;
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (!tty->port.count) {
++      if (!atomic_read(&tty->port.count)) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return -EINVAL;
+       }
+@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+       if (!tty)
+               return 0;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return 0;
+       return tty->tx_bytes_queued;
+@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       return get_control_lines(tty);
+@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       return set_control_lines(tty, set, clear);
+@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       /* FIXME: Exactly how is the tty object locked here .. */
+@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
+                                * are gone */
+                               mutex_lock(&ttyj->ipw_tty_mutex);
+                       }
+-                      while (ttyj->port.count)
++                      while (atomic_read(&ttyj->port.count))
+                               do_ipw_close(ttyj);
+                       ipwireless_disassociate_network_ttys(network,
+                                                            ttyj->channel_idx);
+diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
+index 1deaca4..c8582d4 100644
+--- a/drivers/tty/moxa.c
++++ b/drivers/tty/moxa.c
+@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
+       }
+       ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
+-      ch->port.count++;
++      atomic_inc(&ch->port.count);
+       tty->driver_data = ch;
+       tty_port_tty_set(&ch->port, tty);
+       mutex_lock(&ch->port.mutex);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 6422390..49003ac8 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+       spin_lock_init(&dlci->lock);
+       mutex_init(&dlci->mutex);
+       dlci->fifo = &dlci->_fifo;
+-      if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
++      if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
+               kfree(dlci);
+               return NULL;
+       }
+@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+       struct gsm_dlci *dlci = tty->driver_data;
+       struct tty_port *port = &dlci->port;
+-      port->count++;
++      atomic_inc(&port->count);
+       dlci_get(dlci);
+       dlci_get(dlci->gsm->dlci[0]);
+       mux_get(dlci->gsm);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 6c7fe90..9241dab 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+ {
+       *ops = tty_ldisc_N_TTY;
+       ops->owner = NULL;
+-      ops->refcount = ops->flags = 0;
++      atomic_set(&ops->refcount, 0);
++      ops->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index abfd990..5ab5da9 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
+               panic("Couldn't register Unix98 pts driver");
+       /* Now create the /dev/ptmx special device */
++      pax_open_kernel();
+       tty_default_fops(&ptmx_fops);
+-      ptmx_fops.open = ptmx_open;
++      *(void **)&ptmx_fops.open = ptmx_open;
++      pax_close_kernel();
+       cdev_init(&ptmx_cdev, &ptmx_fops);
+       if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
+index 354564e..fe50d9a 100644
+--- a/drivers/tty/rocket.c
++++ b/drivers/tty/rocket.c
+@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
+       tty->driver_data = info;
+       tty_port_tty_set(port, tty);
+-      if (port->count++ == 0) {
++      if (atomic_inc_return(&port->count) == 1) {
+               atomic_inc(&rp_num_ports_open);
+ #ifdef ROCKET_DEBUG_OPEN
+@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
+ #endif
+       }
+ #ifdef ROCKET_DEBUG_OPEN
+-      printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
++      printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
+ #endif
+       /*
+@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
+               spin_unlock_irqrestore(&info->port.lock, flags);
+               return;
+       }
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               atomic_dec(&rp_num_ports_open);
+       clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+       spin_unlock_irqrestore(&info->port.lock, flags);
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 1002054..dd644a8 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -24,8 +24,9 @@
+ #define MAX_CONFIG_LEN                40
+ static struct kgdb_io         kgdboc_io_ops;
++static struct kgdb_io         kgdboc_io_ops_console;
+-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
+ static int configured         = -1;
+ static char config[MAX_CONFIG_LEN];
+@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
+       kgdboc_unregister_kbd();
+       if (configured == 1)
+               kgdb_unregister_io_module(&kgdboc_io_ops);
++      else if (configured == 2)
++              kgdb_unregister_io_module(&kgdboc_io_ops_console);
+ }
+ static int configure_kgdboc(void)
+@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
+       int err;
+       char *cptr = config;
+       struct console *cons;
++      int is_console = 0;
+       err = kgdboc_option_setup(config);
+       if (err || !strlen(config) || isspace(config[0]))
+               goto noconfig;
+       err = -ENODEV;
+-      kgdboc_io_ops.is_console = 0;
+       kgdb_tty_driver = NULL;
+       kgdboc_use_kms = 0;
+@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
+               int idx;
+               if (cons->device && cons->device(cons, &idx) == p &&
+                   idx == tty_line) {
+-                      kgdboc_io_ops.is_console = 1;
++                      is_console = 1;
+                       break;
+               }
+               cons = cons->next;
+@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
+       kgdb_tty_line = tty_line;
+ do_register:
+-      err = kgdb_register_io_module(&kgdboc_io_ops);
++      if (is_console) {
++              err = kgdb_register_io_module(&kgdboc_io_ops_console);
++              configured = 2;
++      } else {
++              err = kgdb_register_io_module(&kgdboc_io_ops);
++              configured = 1;
++      }
+       if (err)
+               goto noconfig;
+@@ -205,8 +214,6 @@ do_register:
+       if (err)
+               goto nmi_con_failed;
+-      configured = 1;
+-
+       return 0;
+ nmi_con_failed:
+@@ -223,7 +230,7 @@ noconfig:
+ static int __init init_kgdboc(void)
+ {
+       /* Already configured? */
+-      if (configured == 1)
++      if (configured >= 1)
+               return 0;
+       return configure_kgdboc();
+@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+       if (config[len - 1] == '\n')
+               config[len - 1] = '\0';
+-      if (configured == 1)
++      if (configured >= 1)
+               cleanup_kgdboc();
+       /* Go and configure with the new params. */
+@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
+       .post_exception         = kgdboc_post_exp_handler,
+ };
++static struct kgdb_io kgdboc_io_ops_console = {
++      .name                   = "kgdboc",
++      .read_char              = kgdboc_get_char,
++      .write_char             = kgdboc_put_char,
++      .pre_exception          = kgdboc_pre_exp_handler,
++      .post_exception         = kgdboc_post_exp_handler,
++      .is_console             = 1
++};
++
+ #ifdef CONFIG_KGDB_SERIAL_CONSOLE
+ /* This is only available if kgdboc is a built in for early debugging */
+ static int __init kgdboc_early_init(char *opt)
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 0c8a9fa..234a95f 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
+       }
+ }
++static int s3c64xx_serial_startup(struct uart_port *port);
+ static int s3c24xx_serial_startup(struct uart_port *port)
+ {
+       struct s3c24xx_uart_port *ourport = to_ourport(port);
+       int ret;
++      /* Startup sequence is different for s3c64xx and higher SoC's */
++      if (s3c24xx_serial_has_interrupt_mask(port))
++              return s3c64xx_serial_startup(port);
++
+       dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
+           port->mapbase, port->membase);
+@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+       /* setup info for port */
+       port->dev       = &platdev->dev;
+-      /* Startup sequence is different for s3c64xx and higher SoC's */
+-      if (s3c24xx_serial_has_interrupt_mask(port))
+-              s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
+-
+       port->uartclk = 1;
+       if (cfg->uart_flags & UPF_CONS_FLOW) {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index f87dbfd..42ad4b1 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
+               uart_flush_buffer(tty);
+               uart_shutdown(tty, state);
+               spin_lock_irqsave(&port->lock, flags);
+-              port->count = 0;
++              atomic_set(&port->count, 0);
+               clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+               spin_unlock_irqrestore(&port->lock, flags);
+               tty_port_tty_set(port, NULL);
+@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
+               goto end;
+       }
+-      port->count++;
++      atomic_inc(&port->count);
+       if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+               retval = -ENXIO;
+               goto err_dec_count;
+@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
+       /*
+        * Make sure the device is in D0 state.
+        */
+-      if (port->count == 1)
++      if (atomic_read(&port->count) == 1)
+               uart_change_pm(state, UART_PM_STATE_ON);
+       /*
+@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
+ end:
+       return retval;
+ err_dec_count:
+-      port->count--;
++      atomic_inc(&port->count);
+       mutex_unlock(&port->mutex);
+       goto end;
+ }
+diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
+index 8eaf1ab..85c030d 100644
+--- a/drivers/tty/synclink.c
++++ b/drivers/tty/synclink.c
+@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
+-                       __FILE__,__LINE__, info->device_name, info->port.count);
++                       __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)                   
+               goto cleanup;
+@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
+ cleanup:                      
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+-                      tty->driver->name, info->port.count);
++                      tty->driver->name, atomic_read(&info->port.count));
+                       
+ }     /* end of mgsl_close() */
+@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
+       mgsl_flush_buffer(tty);
+       shutdown(info);
+-      
+-      info->port.count = 0;   
++
++      atomic_set(&info->port.count, 0);
+       info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+       info->port.tty = NULL;
+@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):block_til_ready before block on %s count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       spin_lock_irqsave(&info->irq_spinlock, flags);
+       if (!tty_hung_up_p(filp)) {
+               extra_count = true;
+-              port->count--;
++              atomic_dec(&port->count);
+       }
+       spin_unlock_irqrestore(&info->irq_spinlock, flags);
+       port->blocked_open++;
+@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+               
+               if (debug_level >= DEBUG_LEVEL_INFO)
+                       printk("%s(%d):block_til_ready blocking on %s count=%d\n",
+-                               __FILE__,__LINE__, tty->driver->name, port->count );
++                               __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+                                
+               tty_unlock(tty);
+               schedule();
+@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+       
+       /* FIXME: Racy on hangup during close wait */
+       if (extra_count)
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+                        
+       if (!retval)
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
+               
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
+-                       __FILE__,__LINE__,tty->driver->name, info->port.count);
++                       __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
+       /* If port is closing, signal caller to try again */
+       if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
+               spin_unlock_irqrestore(&info->netlock, flags);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0)
+@@ -3446,8 +3446,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if (atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       
+       return retval;
+@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
+index 1abf946..1ee34fc 100644
+--- a/drivers/tty/synclink_gt.c
++++ b/drivers/tty/synclink_gt.c
+@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
+       tty->driver_data = info;
+       info->port.tty = tty;
+-      DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
++      DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
+       /* If port is closing, signal caller to try again */
+       if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
+               mutex_unlock(&info->port.mutex);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0) {
+@@ -715,8 +715,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if(atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       DBGINFO(("%s open rc=%d\n", info->device_name, retval));
+@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       if (sanity_check(info, tty->name, "close"))
+               return;
+-      DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
++      DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)
+               goto cleanup;
+@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       tty_port_close_end(&info->port, tty);
+       info->port.tty = NULL;
+ cleanup:
+-      DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
++      DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
+ }
+ static void hangup(struct tty_struct *tty)
+@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
+       shutdown(info);
+       spin_lock_irqsave(&info->port.lock, flags);
+-      info->port.count = 0;
++      atomic_set(&info->port.count, 0);
+       info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+       info->port.tty = NULL;
+       spin_unlock_irqrestore(&info->port.lock, flags);
+@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       DBGINFO(("%s hdlcdev_attach\n", info->device_name));
+@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               DBGINFO(("%s hdlc_open busy\n", dev->name));
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+       DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
+               if (port == NULL)
+                       continue;
+               spin_lock(&port->lock);
+-              if ((port->port.count || port->netcount) &&
++              if ((atomic_read(&port->port.count) || port->netcount) &&
+                   port->pending_bh && !port->bh_running &&
+                   !port->bh_requested) {
+                       DBGISR(("%s bh queued\n", port->device_name));
+@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       spin_lock_irqsave(&info->lock, flags);
+       if (!tty_hung_up_p(filp)) {
+               extra_count = true;
+-              port->count--;
++              atomic_dec(&port->count);
+       }
+       spin_unlock_irqrestore(&info->lock, flags);
+       port->blocked_open++;
+@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       remove_wait_queue(&port->open_wait, &wait);
+       if (extra_count)
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       if (!retval)
+diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
+index ff17138..e38b41e 100644
+--- a/drivers/tty/synclinkmp.c
++++ b/drivers/tty/synclinkmp.c
+@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s open(), old ref count = %d\n",
+-                       __FILE__,__LINE__,tty->driver->name, info->port.count);
++                       __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
+       /* If port is closing, signal caller to try again */
+       if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
+               spin_unlock_irqrestore(&info->netlock, flags);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0)
+@@ -796,8 +796,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if(atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       return retval;
+@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s close() entry, count=%d\n",
+-                       __FILE__,__LINE__, info->device_name, info->port.count);
++                       __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)
+               goto cleanup;
+@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+ cleanup:
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
+-                      tty->driver->name, info->port.count);
++                      tty->driver->name, atomic_read(&info->port.count));
+ }
+ /* Called by tty_hangup() when a hangup is signaled.
+@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
+       shutdown(info);
+       spin_lock_irqsave(&info->port.lock, flags);
+-      info->port.count = 0;
++      atomic_set(&info->port.count, 0);
+       info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+       info->port.tty = NULL;
+       spin_unlock_irqrestore(&info->port.lock, flags);
+@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
+                * do not request bottom half processing if the
+                * device is not open in a normal mode.
+                */
+-              if ( port && (port->port.count || port->netcount) &&
++              if ( port && (atomic_read(&port->port.count) || port->netcount) &&
+                    port->pending_bh && !port->bh_running &&
+                    !port->bh_requested ) {
+                       if ( debug_level >= DEBUG_LEVEL_ISR )
+@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s block_til_ready() before block, count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       spin_lock_irqsave(&info->lock, flags);
+       if (!tty_hung_up_p(filp)) {
+               extra_count = true;
+-              port->count--;
++              atomic_dec(&port->count);
+       }
+       spin_unlock_irqrestore(&info->lock, flags);
+       port->blocked_open++;
+@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+               if (debug_level >= DEBUG_LEVEL_INFO)
+                       printk("%s(%d):%s block_til_ready() count=%d\n",
+-                               __FILE__,__LINE__, tty->driver->name, port->count );
++                               __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+               tty_unlock(tty);
+               schedule();
+@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       remove_wait_queue(&port->open_wait, &wait);
+       if (extra_count)
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s block_til_ready() after, count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       if (!retval)
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index b51c154..17d55d1 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
+ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
+                                  size_t count, loff_t *ppos)
+ {
+-      if (count) {
++      if (count && capable(CAP_SYS_ADMIN)) {
+               char c;
+               if (get_user(c, buf))
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 4476682..d77e748 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
+ void tty_default_fops(struct file_operations *fops)
+ {
+-      *fops = tty_fops;
++      memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
+ }
+ /*
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 1afe192..73d2c20 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
+       raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+       tty_ldiscs[disc] = new_ldisc;
+       new_ldisc->num = disc;
+-      new_ldisc->refcount = 0;
++      atomic_set(&new_ldisc->refcount, 0);
+       raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+       return ret;
+@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
+               return -EINVAL;
+       raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+-      if (tty_ldiscs[disc]->refcount)
++      if (atomic_read(&tty_ldiscs[disc]->refcount))
+               ret = -EBUSY;
+       else
+               tty_ldiscs[disc] = NULL;
+@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
+       if (ldops) {
+               ret = ERR_PTR(-EAGAIN);
+               if (try_module_get(ldops->owner)) {
+-                      ldops->refcount++;
++                      atomic_inc(&ldops->refcount);
+                       ret = ldops;
+               }
+       }
+@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+       unsigned long flags;
+       raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+-      ldops->refcount--;
++      atomic_dec(&ldops->refcount);
+       module_put(ldops->owner);
+       raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
+       /* unreleased reader reference(s) will cause this WARN */
+       WARN_ON(!atomic_dec_and_test(&ld->users));
+-      ld->ops->refcount--;
++      atomic_dec(&ld->ops->refcount);
+       module_put(ld->ops->owner);
+       kfree(ld);
+       raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index f597e88..b7f68ed 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
+       unsigned long flags;
+       spin_lock_irqsave(&port->lock, flags);
+-      port->count = 0;
++      atomic_set(&port->count, 0);
+       port->flags &= ~ASYNC_NORMAL_ACTIVE;
+       tty = port->tty;
+       if (tty)
+@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+       /* The port lock protects the port counts */
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
+-              port->count--;
++              atomic_dec(&port->count);
+       port->blocked_open++;
+       spin_unlock_irqrestore(&port->lock, flags);
+@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+          we must not mess that up further */
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       if (retval == 0)
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
+               return 0;
+       }
+-      if (tty->count == 1 && port->count != 1) {
++      if (tty->count == 1 && atomic_read(&port->count) != 1) {
+               printk(KERN_WARNING
+                   "tty_port_close_start: tty->count = 1 port count = %d.\n",
+-                                                              port->count);
+-              port->count = 1;
++                                                              atomic_read(&port->count));
++              atomic_set(&port->count, 1);
+       }
+-      if (--port->count < 0) {
++      if (atomic_dec_return(&port->count) < 0) {
+               printk(KERN_WARNING "tty_port_close_start: count = %d\n",
+-                                                              port->count);
+-              port->count = 0;
++                                                              atomic_read(&port->count));
++              atomic_set(&port->count, 0);
+       }
+-      if (port->count) {
++      if (atomic_read(&port->count)) {
+               spin_unlock_irqrestore(&port->lock, flags);
+               if (port->ops->drop)
+                       port->ops->drop(port);
+@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+ {
+       spin_lock_irq(&port->lock);
+       if (!tty_hung_up_p(filp))
+-              ++port->count;
++              atomic_inc(&port->count);
+       spin_unlock_irq(&port->lock);
+       tty_port_tty_set(port, tty);
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index a9af1b9a..1e08e7f 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
+            kbd->kbdmode == VC_OFF) &&
+            value != KVAL(K_SAK))
+               return;         /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++      {
++              void *func = fn_handler[value];
++              if (func == fn_show_state || func == fn_show_ptregs ||
++                  func == fn_show_mem)
++                      return;
++      }
++#endif
++
+       fn_handler[value](vc);
+ }
+@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+       if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+               return -EFAULT;
+-      if (!capable(CAP_SYS_TTY_CONFIG))
+-              perm = 0;
+-
+       switch (cmd) {
+       case KDGKBENT:
+               /* Ensure another thread doesn't free it under us */
+@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+               spin_unlock_irqrestore(&kbd_event_lock, flags);
+               return put_user(val, &user_kbe->kb_value);
+       case KDSKBENT:
++              if (!capable(CAP_SYS_TTY_CONFIG))
++                      perm = 0;
++
+               if (!perm)
+                       return -EPERM;
+               if (!i && v == K_NOSUCHMAP) {
+@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+       int i, j, k;
+       int ret;
+-      if (!capable(CAP_SYS_TTY_CONFIG))
+-              perm = 0;
+-
+       kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
+       if (!kbs) {
+               ret = -ENOMEM;
+@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+               kfree(kbs);
+               return ((p && *p) ? -EOVERFLOW : 0);
+       case KDSKBSENT:
++              if (!capable(CAP_SYS_TTY_CONFIG))
++                      perm = 0;
++
+               if (!perm) {
+                       ret = -EPERM;
+                       goto reterr;
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index b645c47..a55c182 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -25,6 +25,7 @@
+ #include <linux/kobject.h>
+ #include <linux/cdev.h>
+ #include <linux/uio_driver.h>
++#include <asm/local.h>
+ #define UIO_MAX_DEVICES               (1U << MINORBITS)
+@@ -32,10 +33,10 @@ struct uio_device {
+       struct module           *owner;
+       struct device           *dev;
+       int                     minor;
+-      atomic_t                event;
++      atomic_unchecked_t      event;
+       struct fasync_struct    *async_queue;
+       wait_queue_head_t       wait;
+-      int                     vma_count;
++      local_t                 vma_count;
+       struct uio_info         *info;
+       struct kobject          *map_dir;
+       struct kobject          *portio_dir;
+@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+ {
+       struct uio_device *idev = dev_get_drvdata(dev);
+-      return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
++      return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
+ }
+ static struct device_attribute uio_class_attributes[] = {
+@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
+ {
+       struct uio_device *idev = info->uio_dev;
+-      atomic_inc(&idev->event);
++      atomic_inc_unchecked(&idev->event);
+       wake_up_interruptible(&idev->wait);
+       kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+ }
+@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
+       }
+       listener->dev = idev;
+-      listener->event_count = atomic_read(&idev->event);
++      listener->event_count = atomic_read_unchecked(&idev->event);
+       filep->private_data = listener;
+       if (idev->info->open) {
+@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
+               return -EIO;
+       poll_wait(filep, &idev->wait, wait);
+-      if (listener->event_count != atomic_read(&idev->event))
++      if (listener->event_count != atomic_read_unchecked(&idev->event))
+               return POLLIN | POLLRDNORM;
+       return 0;
+ }
+@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+-              event_count = atomic_read(&idev->event);
++              event_count = atomic_read_unchecked(&idev->event);
+               if (event_count != listener->event_count) {
+                       if (copy_to_user(buf, &event_count, count))
+                               retval = -EFAULT;
+@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
+ static void uio_vma_open(struct vm_area_struct *vma)
+ {
+       struct uio_device *idev = vma->vm_private_data;
+-      idev->vma_count++;
++      local_inc(&idev->vma_count);
+ }
+ static void uio_vma_close(struct vm_area_struct *vma)
+ {
+       struct uio_device *idev = vma->vm_private_data;
+-      idev->vma_count--;
++      local_dec(&idev->vma_count);
+ }
+ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
+       idev->owner = owner;
+       idev->info = info;
+       init_waitqueue_head(&idev->wait);
+-      atomic_set(&idev->event, 0);
++      atomic_set_unchecked(&idev->event, 0);
+       ret = uio_get_minor(idev);
+       if (ret)
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 8a7eb77..c00402f 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+               ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+               if (ret < 2)
+                       return -EINVAL;
+-              if (index < 0 || index > 0x7f)
++              if (index > 0x7f)
+                       return -EINVAL;
+               pos += tmp;
+diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
+index d3527dd..26effa2 100644
+--- a/drivers/usb/atm/usbatm.c
++++ b/drivers/usb/atm/usbatm.c
+@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (printk_ratelimit())
+                       atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+                               __func__, vpi, vci);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+               return;
+       }
+@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (length > ATM_MAX_AAL5_PDU) {
+                       atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+                                 __func__, length, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (sarb->len < pdu_length) {
+                       atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+                                 __func__, pdu_length, sarb->len, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+               if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+                       atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+                                 __func__, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+                       if (printk_ratelimit())
+                               atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+                                       __func__, length);
+-                      atomic_inc(&vcc->stats->rx_drop);
++                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto out;
+               }
+@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               vcc->push(vcc, skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       out:
+               skb_trim(sarb, 0);
+       }
+@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
+                       struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+                       usbatm_pop(vcc, skb);
+-                      atomic_inc(&vcc->stats->tx);
++                      atomic_inc_unchecked(&vcc->stats->tx);
+                       skb = skb_dequeue(&instance->sndqueue);
+               }
+@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
+       if (!left--)
+               return sprintf(page,
+                              "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+-                             atomic_read(&atm_dev->stats.aal5.tx),
+-                             atomic_read(&atm_dev->stats.aal5.tx_err),
+-                             atomic_read(&atm_dev->stats.aal5.rx),
+-                             atomic_read(&atm_dev->stats.aal5.rx_err),
+-                             atomic_read(&atm_dev->stats.aal5.rx_drop));
++                             atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+       if (!left--) {
+               if (instance->disconnected)
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index 2a3bbdf..91d72cf 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -126,7 +126,7 @@ static const char format_endpt[] =
+  * time it gets called.
+  */
+ static struct device_connect_event {
+-      atomic_t count;
++      atomic_unchecked_t count;
+       wait_queue_head_t wait;
+ } device_event = {
+       .count = ATOMIC_INIT(1),
+@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
+ void usbfs_conn_disc_event(void)
+ {
+-      atomic_add(2, &device_event.count);
++      atomic_add_unchecked(2, &device_event.count);
+       wake_up(&device_event.wait);
+ }
+@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
+       poll_wait(file, &device_event.wait, wait);
+-      event_count = atomic_read(&device_event.count);
++      event_count = atomic_read_unchecked(&device_event.count);
+       if (file->f_version != event_count) {
+               file->f_version = event_count;
+               return POLLIN | POLLRDNORM;
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d53547d..6a22d02 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+        */
+       usb_get_urb(urb);
+       atomic_inc(&urb->use_count);
+-      atomic_inc(&urb->dev->urbnum);
++      atomic_inc_unchecked(&urb->dev->urbnum);
+       usbmon_urb_submit(&hcd->self, urb);
+       /* NOTE requirements on root-hub callers (usbfs and the hub
+@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+               urb->hcpriv = NULL;
+               INIT_LIST_HEAD(&urb->urb_list);
+               atomic_dec(&urb->use_count);
+-              atomic_dec(&urb->dev->urbnum);
++              atomic_dec_unchecked(&urb->dev->urbnum);
+               if (atomic_read(&urb->reject))
+                       wake_up(&usb_kill_urb_queue);
+               usb_put_urb(urb);
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 444d30e..f15c850 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
+  * method can wait for it to complete.  Since you don't have a handle on the
+  * URB used, you can't cancel the request.
+  */
+-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
++int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+                   __u8 requesttype, __u16 value, __u16 index, void *data,
+                   __u16 size, int timeout)
+ {
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index aa38db4..0a08682 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
+       struct usb_device *udev;
+       udev = to_usb_device(dev);
+-      return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
++      return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
+ }
+ static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index b10da72..43aa0b2 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+       set_dev_node(&dev->dev, dev_to_node(bus->controller));
+       dev->state = USB_STATE_ATTACHED;
+       dev->lpm_disable_count = 1;
+-      atomic_set(&dev->urbnum, 0);
++      atomic_set_unchecked(&dev->urbnum, 0);
+       INIT_LIST_HEAD(&dev->ep0.urb_list);
+       dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 5e29dde..eca992f 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
+ #ifdef CONFIG_KGDB
+ static struct kgdb_io kgdbdbgp_io_ops;
+-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
++static struct kgdb_io kgdbdbgp_io_ops_console;
++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
+ #else
+ #define dbgp_kgdb_mode (0)
+ #endif
+@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
+       .write_char = kgdbdbgp_write_char,
+ };
++static struct kgdb_io kgdbdbgp_io_ops_console = {
++      .name = "kgdbdbgp",
++      .read_char = kgdbdbgp_read_char,
++      .write_char = kgdbdbgp_write_char,
++      .is_console = 1
++};
++
+ static int kgdbdbgp_wait_time;
+ static int __init kgdbdbgp_parse_config(char *str)
+@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
+               ptr++;
+               kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+       }
+-      kgdb_register_io_module(&kgdbdbgp_io_ops);
+-      kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
++      if (early_dbgp_console.index != -1)
++              kgdb_register_io_module(&kgdbdbgp_io_ops_console);
++      else
++              kgdb_register_io_module(&kgdbdbgp_io_ops);
+       return 0;
+ }
+diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
+index b369292..9f3ba40 100644
+--- a/drivers/usb/gadget/u_serial.c
++++ b/drivers/usb/gadget/u_serial.c
+@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
+                       spin_lock_irq(&port->port_lock);
+                       /* already open?  Great. */
+-                      if (port->port.count) {
++                      if (atomic_read(&port->port.count)) {
+                               status = 0;
+-                              port->port.count++;
++                              atomic_inc(&port->port.count);
+                       /* currently opening/closing? wait ... */
+                       } else if (port->openclose) {
+@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
+       tty->driver_data = port;
+       port->port.tty = tty;
+-      port->port.count = 1;
++      atomic_set(&port->port.count, 1);
+       port->openclose = false;
+       /* if connected, start the I/O stream */
+@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
+       spin_lock_irq(&port->port_lock);
+-      if (port->port.count != 1) {
+-              if (port->port.count == 0)
++      if (atomic_read(&port->port.count) != 1) {
++              if (atomic_read(&port->port.count) == 0)
+                       WARN_ON(1);
+               else
+-                      --port->port.count;
++                      atomic_dec(&port->port.count);
+               goto exit;
+       }
+@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
+        * and sleep if necessary
+        */
+       port->openclose = true;
+-      port->port.count = 0;
++      atomic_set(&port->port.count, 0);
+       gser = port->port_usb;
+       if (gser && gser->disconnect)
+@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
+       int cond;
+       spin_lock_irq(&port->port_lock);
+-      cond = (port->port.count == 0) && !port->openclose;
++      cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
+       spin_unlock_irq(&port->port_lock);
+       return cond;
+ }
+@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
+       /* if it's already open, start I/O ... and notify the serial
+        * protocol about open/close status (connect/disconnect).
+        */
+-      if (port->port.count) {
++      if (atomic_read(&port->port.count)) {
+               pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
+               gs_start_io(port);
+               if (gser->connect)
+@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
+       port->port_usb = NULL;
+       gser->ioport = NULL;
+-      if (port->port.count > 0 || port->openclose) {
++      if (atomic_read(&port->port.count) > 0 || port->openclose) {
+               wake_up_interruptible(&port->drain_wait);
+               if (port->port.tty)
+                       tty_hangup(port->port.tty);
+@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
+       /* finally, free any unused/unusable I/O buffers */
+       spin_lock_irqsave(&port->port_lock, flags);
+-      if (port->port.count == 0 && !port->openclose)
++      if (atomic_read(&port->port.count) == 0 && !port->openclose)
+               gs_buf_free(&port->port_write_buf);
+       gs_free_requests(gser->out, &port->read_pool, NULL);
+       gs_free_requests(gser->out, &port->read_queue, NULL);
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 5f3bcd3..bfca43f 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
+       info->port = port;
+-      ++port->port.count;
++      atomic_inc(&port->port.count);
+       if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
+               if (serial->type->set_termios) {
+                       /*
+@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
+       }
+       /* Now that any required fake tty operations are completed restore
+        * the tty port count */
+-      --port->port.count;
++      atomic_dec(&port->port.count);
+       /* The console is special in terms of closing the device so
+        * indicate this port is now acting as a system console. */
+       port->port.console = 1;
+@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
+  free_tty:
+       kfree(tty);
+  reset_open_count:
+-      port->port.count = 0;
++      atomic_set(&port->port.count, 0);
+       usb_autopm_put_interface(serial->interface);
+  error_get_interface:
+       usb_serial_put(serial);
+diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
+index 75f70f0..d467e1a 100644
+--- a/drivers/usb/storage/usb.h
++++ b/drivers/usb/storage/usb.h
+@@ -63,7 +63,7 @@ struct us_unusual_dev {
+       __u8  useProtocol;
+       __u8  useTransport;
+       int (*initFunction)(struct us_data *);
+-};
++} __do_const;
+ /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
+diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
+index d6bea3e..60b250e 100644
+--- a/drivers/usb/wusbcore/wa-hc.h
++++ b/drivers/usb/wusbcore/wa-hc.h
+@@ -192,7 +192,7 @@ struct wahc {
+       struct list_head xfer_delayed_list;
+       spinlock_t xfer_list_lock;
+       struct work_struct xfer_work;
+-      atomic_t xfer_id_count;
++      atomic_unchecked_t xfer_id_count;
+ };
+@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
+       INIT_LIST_HEAD(&wa->xfer_delayed_list);
+       spin_lock_init(&wa->xfer_list_lock);
+       INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+-      atomic_set(&wa->xfer_id_count, 1);
++      atomic_set_unchecked(&wa->xfer_id_count, 1);
+ }
+ /**
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 028fc83..65bb105 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -296,7 +296,7 @@ out:
+  */
+ static void wa_xfer_id_init(struct wa_xfer *xfer)
+ {
+-      xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
++      xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
+ }
+ /*
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 5174eba..86e764a 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
+ static inline int putu16_kern(u16 *p, u16 val)
+ {
+-      ACCESS_ONCE(*p) = val;
++      ACCESS_ONCE_RW(*p) = val;
+       return 0;
+ }
+diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
+index 8c55011..eed4ae1a 100644
+--- a/drivers/video/aty/aty128fb.c
++++ b/drivers/video/aty/aty128fb.c
+@@ -149,7 +149,7 @@ enum {
+ };
+ /* Must match above enum */
+-static char * const r128_family[] = {
++static const char * const r128_family[] = {
+       "AGP",
+       "PCI",
+       "PRO AGP",
+diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
+index 4f27fdc..d3537e6 100644
+--- a/drivers/video/aty/atyfb_base.c
++++ b/drivers/video/aty/atyfb_base.c
+@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
+       par->accel_flags = var->accel_flags; /* hack */
+       if (var->accel_flags) {
+-              info->fbops->fb_sync = atyfb_sync;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_sync = atyfb_sync;
++              pax_close_kernel();
+               info->flags &= ~FBINFO_HWACCEL_DISABLED;
+       } else {
+-              info->fbops->fb_sync = NULL;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_sync = NULL;
++              pax_close_kernel();
+               info->flags |= FBINFO_HWACCEL_DISABLED;
+       }
+diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
+index 95ec042..e6affdd 100644
+--- a/drivers/video/aty/mach64_cursor.c
++++ b/drivers/video/aty/mach64_cursor.c
+@@ -7,6 +7,7 @@
+ #include <linux/string.h>
+ #include <asm/io.h>
++#include <asm/pgtable.h>
+ #ifdef __sparc__
+ #include <asm/fbio.h>
+@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
+       info->sprite.buf_align = 16;    /* and 64 lines tall. */
+       info->sprite.flags = FB_PIXMAP_IO;
+-      info->fbops->fb_cursor = atyfb_cursor;
++      pax_open_kernel();
++      *(void **)&info->fbops->fb_cursor = atyfb_cursor;
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
+index c74e7aa..e3c2790 100644
+--- a/drivers/video/backlight/backlight.c
++++ b/drivers/video/backlight/backlight.c
+@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
+       new_bd->dev.class = backlight_class;
+       new_bd->dev.parent = parent;
+       new_bd->dev.release = bl_device_release;
+-      dev_set_name(&new_bd->dev, name);
++      dev_set_name(&new_bd->dev, "%s", name);
+       dev_set_drvdata(&new_bd->dev, devdata);
+       /* Set default properties */
+diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
+index bca6ccc..252107e 100644
+--- a/drivers/video/backlight/kb3886_bl.c
++++ b/drivers/video/backlight/kb3886_bl.c
+@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
+ static unsigned long kb3886bl_flags;
+ #define KB3886BL_SUSPENDED     0x01
+-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
++static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
+       {
+               .ident = "Sahara Touch-iT",
+               .matches = {
+diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
+index 34fb6bd..3649fd9 100644
+--- a/drivers/video/backlight/lcd.c
++++ b/drivers/video/backlight/lcd.c
+@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+       new_ld->dev.class = lcd_class;
+       new_ld->dev.parent = parent;
+       new_ld->dev.release = lcd_device_release;
+-      dev_set_name(&new_ld->dev, name);
++      dev_set_name(&new_ld->dev, "%s", name);
+       dev_set_drvdata(&new_ld->dev, devdata);
+       rc = device_register(&new_ld->dev);
+diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
+index 900aa4e..6d49418 100644
+--- a/drivers/video/fb_defio.c
++++ b/drivers/video/fb_defio.c
+@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
+       BUG_ON(!fbdefio);
+       mutex_init(&fbdefio->lock);
+-      info->fbops->fb_mmap = fb_deferred_io_mmap;
++      pax_open_kernel();
++      *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
++      pax_close_kernel();
+       INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+       INIT_LIST_HEAD(&fbdefio->pagelist);
+       if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
+               page->mapping = NULL;
+       }
+-      info->fbops->fb_mmap = NULL;
++      *(void **)&info->fbops->fb_mmap = NULL;
+       mutex_destroy(&fbdefio->lock);
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
+index 5c3960d..15cf8fc 100644
+--- a/drivers/video/fbcmap.c
++++ b/drivers/video/fbcmap.c
+@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
+               rc = -ENODEV;
+               goto out;
+       }
+-      if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
+-                              !info->fbops->fb_setcmap)) {
++      if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
+               rc = -EINVAL;
+               goto out1;
+       }
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 098bfc6..796841d 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+                       image->dx += image->width + 8;
+               }
+       } else if (rotate == FB_ROTATE_UD) {
+-              for (x = 0; x < num && image->dx >= 0; x++) {
++              for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
+                       info->fbops->fb_imageblit(info, image);
+                       image->dx -= image->width + 8;
+               }
+@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+                       image->dy += image->height + 8;
+               }
+       } else if (rotate == FB_ROTATE_CCW) {
+-              for (x = 0; x < num && image->dy >= 0; x++) {
++              for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
+                       info->fbops->fb_imageblit(info, image);
+                       image->dy -= image->height + 8;
+               }
+@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+                       return -EFAULT;
+               if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+                       return -EINVAL;
+-              if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
++              if (con2fb.framebuffer >= FB_MAX)
+                       return -EINVAL;
+               if (!registered_fb[con2fb.framebuffer])
+                       request_module("fb%d", con2fb.framebuffer);
+diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
+index 7672d2e..b56437f 100644
+--- a/drivers/video/i810/i810_accel.c
++++ b/drivers/video/i810/i810_accel.c
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
+               }
+       }
+       printk("ringbuffer lockup!!!\n");
++      printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+       i810_report_error(mmio); 
+       par->dev_flags |= LOCKUP;
+       info->pixmap.scan_align = 1;
+diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
+index 3c14e43..eafa544 100644
+--- a/drivers/video/logo/logo_linux_clut224.ppm
++++ b/drivers/video/logo/logo_linux_clut224.ppm
+@@ -1,1604 +1,1123 @@
+ P3
+-# Standard 224-color Linux logo
+ 80 80
+ 255
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6   6   6   6  10  10  10  10  10  10
+- 10  10  10   6   6   6   6   6   6   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  10  10  10  14  14  14
+- 22  22  22  26  26  26  30  30  30  34  34  34
+- 30  30  30  30  30  30  26  26  26  18  18  18
+- 14  14  14  10  10  10   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  26  26  26  42  42  42
+- 54  54  54  66  66  66  78  78  78  78  78  78
+- 78  78  78  74  74  74  66  66  66  54  54  54
+- 42  42  42  26  26  26  18  18  18  10  10  10
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 22  22  22  42  42  42  66  66  66  86  86  86
+- 66  66  66  38  38  38  38  38  38  22  22  22
+- 26  26  26  34  34  34  54  54  54  66  66  66
+- 86  86  86  70  70  70  46  46  46  26  26  26
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  26  26  26
+- 50  50  50  82  82  82  58  58  58   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  54  54  54  86  86  86  66  66  66
+- 38  38  38  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  22  22  22  50  50  50
+- 78  78  78  34  34  34   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   6   6   6  70  70  70
+- 78  78  78  46  46  46  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  42  42  42  82  82  82
+- 26  26  26   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  14  14  14
+- 46  46  46  34  34  34   6   6   6   2   2   6
+- 42  42  42  78  78  78  42  42  42  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  30  30  30  66  66  66  58  58  58
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  26  26  26
+- 86  86  86 101 101 101  46  46  46  10  10  10
+-  2   2   6  58  58  58  70  70  70  34  34  34
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  42  42  42  86  86  86  10  10  10
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  30  30  30
+- 94  94  94  94  94  94  58  58  58  26  26  26
+-  2   2   6   6   6   6  78  78  78  54  54  54
+- 22  22  22   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  62  62  62  62  62  62   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  26  26  26
+- 54  54  54  38  38  38  18  18  18  10  10  10
+-  2   2   6   2   2   6  34  34  34  82  82  82
+- 38  38  38  14  14  14   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 30  30  30  78  78  78  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 10  10  10   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  78  78  78
+- 50  50  50  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  14  14  14   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  54  54  54
+- 66  66  66  26  26  26   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  82  82  82   2   2   6   2   2   6
+-  2   2   6   6   6   6  10  10  10   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 14  14  14  10  10  10   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  18  18  18
+- 82  82  82  34  34  34  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6   2   2   6
+-  6   6   6   6   6   6  22  22  22  34  34  34
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  34  34  34
+- 10  10  10  50  50  50  22  22  22   2   2   6
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 86  86  86  42  42  42  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6   2   2   6
+- 38  38  38 116 116 116  94  94  94  22  22  22
+- 22  22  22   2   2   6   2   2   6   2   2   6
+- 14  14  14  86  86  86 138 138 138 162 162 162
+-154 154 154  38  38  38  26  26  26   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 86  86  86  46  46  46  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6  14  14  14
+-134 134 134 198 198 198 195 195 195 116 116 116
+- 10  10  10   2   2   6   2   2   6   6   6   6
+-101  98  89 187 187 187 210 210 210 218 218 218
+-214 214 214 134 134 134  14  14  14   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 86  86  86  50  50  50  18  18  18   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6  54  54  54
+-218 218 218 195 195 195 226 226 226 246 246 246
+- 58  58  58   2   2   6   2   2   6  30  30  30
+-210 210 210 253 253 253 174 174 174 123 123 123
+-221 221 221 234 234 234  74  74  74   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  82  82  82   2   2   6 106 106 106
+-170 170 170  26  26  26  86  86  86 226 226 226
+-123 123 123  10  10  10  14  14  14  46  46  46
+-231 231 231 190 190 190   6   6   6  70  70  70
+- 90  90  90 238 238 238 158 158 158   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  86  86  86   6   6   6 116 116 116
+-106 106 106   6   6   6  70  70  70 149 149 149
+-128 128 128  18  18  18  38  38  38  54  54  54
+-221 221 221 106 106 106   2   2   6  14  14  14
+- 46  46  46 190 190 190 198 198 198   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  62  62  62  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  94  94  94  14  14  14 101 101 101
+-128 128 128   2   2   6  18  18  18 116 116 116
+-118  98  46 121  92   8 121  92   8  98  78  10
+-162 162 162 106 106 106   2   2   6   2   2   6
+-  2   2   6 195 195 195 195 195 195   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  62  62  62  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   1
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  90  90  90  14  14  14  58  58  58
+-210 210 210  26  26  26  54  38   6 154 114  10
+-226 170  11 236 186  11 225 175  15 184 144  12
+-215 174  15 175 146  61  37  26   9   2   2   6
+- 70  70  70 246 246 246 138 138 138   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  66  66  66  26  26  26   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  14  14  14  10  10  10
+-195 195 195 188 164 115 192 133   9 225 175  15
+-239 182  13 234 190  10 232 195  16 232 200  30
+-245 207  45 241 208  19 232 195  16 184 144  12
+-218 194 134 211 206 186  42  42  42   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 50  50  50  74  74  74  30  30  30   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  86  86  86  14  14  14   2   2   6
+-121  87  25 192 133   9 219 162  10 239 182  13
+-236 186  11 232 195  16 241 208  19 244 214  54
+-246 218  60 246 218  38 246 215  20 241 208  19
+-241 208  19 226 184  13 121  87  25   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 50  50  50  82  82  82  34  34  34  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  82  82  82  30  30  30  61  42   6
+-180 123   7 206 145  10 230 174  11 239 182  13
+-234 190  10 238 202  15 241 208  19 246 218  74
+-246 218  38 246 215  20 246 215  20 246 215  20
+-226 184  13 215 174  15 184 144  12   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 26  26  26  94  94  94  42  42  42  14  14  14
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78  50  50  50 104  69   6
+-192 133   9 216 158  10 236 178  12 236 186  11
+-232 195  16 241 208  19 244 214  54 245 215  43
+-246 215  20 246 215  20 241 208  19 198 155  10
+-200 144  11 216 158  10 156 118  10   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  90  90  90  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78  46  46  46  22  22  22
+-137  92   6 210 162  10 239 182  13 238 190  10
+-238 202  15 241 208  19 246 215  20 246 215  20
+-241 208  19 203 166  17 185 133  11 210 150  10
+-216 158  10 210 150  10 102  78  10   2   2   6
+-  6   6   6  54  54  54  14  14  14   2   2   6
+-  2   2   6  62  62  62  74  74  74  30  30  30
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  78  78  78  50  50  50   6   6   6
+- 94  70  30 139 102  15 190 146  13 226 184  13
+-232 200  30 232 195  16 215 174  15 190 146  13
+-168 122  10 192 133   9 210 150  10 213 154  11
+-202 150  34 182 157 106 101  98  89   2   2   6
+-  2   2   6  78  78  78 116 116 116  58  58  58
+-  2   2   6  22  22  22  90  90  90  46  46  46
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  50  50  50   6   6   6
+-128 128 128 174 154 114 156 107  11 168 122  10
+-198 155  10 184 144  12 197 138  11 200 144  11
+-206 145  10 206 145  10 197 138  11 188 164 115
+-195 195 195 198 198 198 174 174 174  14  14  14
+-  2   2   6  22  22  22 116 116 116 116 116 116
+- 22  22  22   2   2   6  74  74  74  70  70  70
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 101 101 101  26  26  26  10  10  10
+-138 138 138 190 190 190 174 154 114 156 107  11
+-197 138  11 200 144  11 197 138  11 192 133   9
+-180 123   7 190 142  34 190 178 144 187 187 187
+-202 202 202 221 221 221 214 214 214  66  66  66
+-  2   2   6   2   2   6  50  50  50  62  62  62
+-  6   6   6   2   2   6  10  10  10  90  90  90
+- 50  50  50  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  34  34  34
+- 74  74  74  74  74  74   2   2   6   6   6   6
+-144 144 144 198 198 198 190 190 190 178 166 146
+-154 121  60 156 107  11 156 107  11 168 124  44
+-174 154 114 187 187 187 190 190 190 210 210 210
+-246 246 246 253 253 253 253 253 253 182 182 182
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  62  62  62
+- 74  74  74  34  34  34  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0  10  10  10  22  22  22  54  54  54
+- 94  94  94  18  18  18   2   2   6  46  46  46
+-234 234 234 221 221 221 190 190 190 190 190 190
+-190 190 190 187 187 187 187 187 187 190 190 190
+-190 190 190 195 195 195 214 214 214 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+- 82  82  82   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  14  14  14
+- 86  86  86  54  54  54  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  46  46  46  90  90  90
+- 46  46  46  18  18  18   6   6   6 182 182 182
+-253 253 253 246 246 246 206 206 206 190 190 190
+-190 190 190 190 190 190 190 190 190 190 190 190
+-206 206 206 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-202 202 202  14  14  14   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 42  42  42  86  86  86  42  42  42  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 14  14  14  38  38  38  74  74  74  66  66  66
+-  2   2   6   6   6   6  90  90  90 250 250 250
+-253 253 253 253 253 253 238 238 238 198 198 198
+-190 190 190 190 190 190 195 195 195 221 221 221
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253  82  82  82   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  78  78  78  70  70  70  34  34  34
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 34  34  34  66  66  66  78  78  78   6   6   6
+-  2   2   6  18  18  18 218 218 218 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-226 226 226 231 231 231 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 178 178 178   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  18  18  18  90  90  90  62  62  62
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  26  26  26
+- 58  58  58  90  90  90  18  18  18   2   2   6
+-  2   2   6 110 110 110 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231  18  18  18   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  94  94  94
+- 54  54  54  26  26  26  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  22  22  22  50  50  50
+- 90  90  90  26  26  26   2   2   6   2   2   6
+- 14  14  14 195 195 195 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 242 242 242  54  54  54   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+- 86  86  86  50  50  50  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  38  38  38  82  82  82
+- 34  34  34   2   2   6   2   2   6   2   2   6
+- 42  42  42 195 195 195 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 242 242 242 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 246 246 246 238 238 238
+-226 226 226 231 231 231 101 101 101   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 38  38  38  82  82  82  42  42  42  14  14  14
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  26  26  26  62  62  62  66  66  66
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 70  70  70 170 170 170 206 206 206 234 234 234
+-246 246 246 250 250 250 250 250 250 238 238 238
+-226 226 226 231 231 231 238 238 238 250 250 250
+-250 250 250 250 250 250 246 246 246 231 231 231
+-214 214 214 206 206 206 202 202 202 202 202 202
+-198 198 198 202 202 202 182 182 182  18  18  18
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  62  62  62  66  66  66  30  30  30
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  42  42  42  82  82  82  18  18  18
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 94  94  94 182 182 182 218 218 218 242 242 242
+-250 250 250 253 253 253 253 253 253 250 250 250
+-234 234 234 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-238 238 238 226 226 226 210 210 210 202 202 202
+-195 195 195 195 195 195 210 210 210 158 158 158
+-  6   6   6  14  14  14  50  50  50  14  14  14
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  86  86  86  46  46  46
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  54  54  54  70  70  70   2   2   6
+-  2   2   6  10  10  10   2   2   6  22  22  22
+-166 166 166 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-231 231 231 206 206 206 198 198 198 226 226 226
+- 94  94  94   2   2   6   6   6   6  38  38  38
+- 30  30  30   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  62  62  62  66  66  66
+- 26  26  26  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  74  74  74  50  50  50   2   2   6
+- 26  26  26  26  26  26   2   2   6 106 106 106
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246 218 218 218 202 202 202
+-210 210 210  14  14  14   2   2   6   2   2   6
+- 30  30  30  22  22  22   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  86  86  86
+- 42  42  42  14  14  14   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  90  90  90  22  22  22   2   2   6
+- 42  42  42   2   2   6  18  18  18 218 218 218
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 221 221 221
+-218 218 218 101 101 101   2   2   6  14  14  14
+- 18  18  18  38  38  38  10  10  10   2   2   6
+-  2   2   6   2   2   6   2   2   6  78  78  78
+- 58  58  58  22  22  22   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 54  54  54  82  82  82   2   2   6  26  26  26
+- 22  22  22   2   2   6 123 123 123 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-238 238 238 198 198 198   6   6   6  38  38  38
+- 58  58  58  26  26  26  38  38  38   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+- 78  78  78  30  30  30  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  30  30  30
+- 74  74  74  58  58  58   2   2   6  42  42  42
+-  2   2   6  22  22  22 231 231 231 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246  46  46  46  38  38  38
+- 42  42  42  14  14  14  38  38  38  14  14  14
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 86  86  86  46  46  46  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  42  42  42
+- 90  90  90  18  18  18  18  18  18  26  26  26
+-  2   2   6 116 116 116 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253  94  94  94   6   6   6
+-  2   2   6   2   2   6  10  10  10  34  34  34
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0  10  10  10  26  26  26  66  66  66
+- 82  82  82   2   2   6  38  38  38   6   6   6
+- 14  14  14 210 210 210 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 246 246 246 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 144 144 144   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 42  42  42  74  74  74  30  30  30  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  42  42  42  90  90  90
+- 26  26  26   6   6   6  42  42  42   2   2   6
+- 74  74  74 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 242 242 242 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 182 182 182   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 10  10  10  86  86  86  38  38  38  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  26  26  26  66  66  66  82  82  82
+-  2   2   6  22  22  22  18  18  18   2   2   6
+-149 149 149 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206   2   2   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  86  86  86  46  46  46  14  14  14
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 18  18  18  46  46  46  86  86  86  18  18  18
+-  2   2   6  34  34  34  10  10  10   6   6   6
+-210 210 210 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 221 221 221   6   6   6
+-  2   2   6   2   2   6   6   6   6  30  30  30
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  82  82  82  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 26  26  26  66  66  66  62  62  62   2   2   6
+-  2   2   6  38  38  38  10  10  10  26  26  26
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231   6   6   6
+-  2   2   6   2   2   6  10  10  10  30  30  30
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  58  58  58  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  78  78  78   6   6   6   2   2   6
+-  2   2   6  46  46  46  14  14  14  42  42  42
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234  10  10  10
+-  2   2   6   2   2   6  22  22  22  14  14  14
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  62  62  62  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50  74  74  74   2   2   6   2   2   6
+- 14  14  14  70  70  70  34  34  34  62  62  62
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234  14  14  14
+-  2   2   6   2   2   6  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  62  62  62  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 54  54  54  62  62  62   2   2   6   2   2   6
+-  2   2   6  30  30  30  46  46  46  70  70  70
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 226 226 226  10  10  10
+-  2   2   6   6   6   6  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  58  58  58  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 58  58  58  62  62  62   2   2   6   2   2   6
+-  2   2   6   2   2   6  30  30  30  78  78  78
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206   2   2   6
+- 22  22  22  34  34  34  18  14   6  22  22  22
+- 26  26  26  18  18  18   6   6   6   2   2   6
+-  2   2   6  82  82  82  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  26  26  26
+- 62  62  62 106 106 106  74  54  14 185 133  11
+-210 162  10 121  92   8   6   6   6  62  62  62
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 158 158 158  18  18  18
+- 14  14  14   2   2   6   2   2   6   2   2   6
+-  6   6   6  18  18  18  66  66  66  38  38  38
+-  6   6   6  94  94  94  50  50  50  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 10  10  10  10  10  10  18  18  18  38  38  38
+- 78  78  78 142 134 106 216 158  10 242 186  14
+-246 190  14 246 190  14 156 118  10  10  10  10
+- 90  90  90 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 230 190
+-238 204  91 238 204  91 181 142  44  37  26   9
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  38  38  38  46  46  46
+- 26  26  26 106 106 106  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  22  22  22
+- 30  30  30  38  38  38  50  50  50  70  70  70
+-106 106 106 190 142  34 226 170  11 242 186  14
+-246 190  14 246 190  14 246 190  14 154 114  10
+-  6   6   6  74  74  74 226 226 226 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 228 184  62
+-241 196  14 241 208  19 232 195  16  38  30  10
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  30  30  30  26  26  26
+-203 166  17 154 142  90  66  66  66  26  26  26
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  38  38  38  58  58  58
+- 78  78  78  86  86  86 101 101 101 123 123 123
+-175 146  61 210 150  10 234 174  13 246 186  14
+-246 190  14 246 190  14 246 190  14 238 190  10
+-102  78  10   2   2   6  46  46  46 198 198 198
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 224 178  62
+-242 186  14 241 196  14 210 166  10  22  18   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   6   6   6 121  92   8
+-238 202  15 232 195  16  82  82  82  34  34  34
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  38  38  38  70  70  70 154 122  46
+-190 142  34 200 144  11 197 138  11 197 138  11
+-213 154  11 226 170  11 242 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-225 175  15  46  32   6   2   2   6  22  22  22
+-158 158 158 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 242 242 242 224 178  62
+-239 182  13 236 186  11 213 154  11  46  32   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  61  42   6 225 175  15
+-238 190  10 236 186  11 112 100  78  42  42  42
+- 14  14  14   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  54  54  54 154 122  46 213 154  11
+-226 170  11 230 174  11 226 170  11 226 170  11
+-236 178  12 242 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-241 196  14 184 144  12  10  10  10   2   2   6
+-  6   6   6 116 116 116 242 242 242 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231 198 198 198 214 170  54
+-236 178  12 236 178  12 210 150  10 137  92   6
+- 18  14   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  70  47   6 200 144  11 236 178  12
+-239 182  13 239 182  13 124 112  88  58  58  58
+- 22  22  22   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  70  70  70 180 133  36 226 170  11
+-239 182  13 242 186  14 242 186  14 246 186  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 232 195  16  98  70   6   2   2   6
+-  2   2   6   2   2   6  66  66  66 221 221 221
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 206 206 206 198 198 198 214 166  58
+-230 174  11 230 174  11 216 158  10 192 133   9
+-163 110   8 116  81   8 102  78  10 116  81   8
+-167 114   7 197 138  11 226 170  11 239 182  13
+-242 186  14 242 186  14 162 146  94  78  78  78
+- 34  34  34  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 30  30  30  78  78  78 190 142  34 226 170  11
+-239 182  13 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 241 196  14 203 166  17  22  18   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+-218 218 218 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 206 206 206 198 198 198 202 162  69
+-226 170  11 236 178  12 224 166  10 210 150  10
+-200 144  11 197 138  11 192 133   9 197 138  11
+-210 150  10 226 170  11 242 186  14 246 190  14
+-246 190  14 246 186  14 225 175  15 124 112  88
+- 62  62  62  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78 174 135  50 224 166  10
+-239 182  13 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 241 196  14 139 102  15
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 78  78  78 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 214 214 214 198 198 198 190 150  46
+-219 162  10 236 178  12 234 174  13 224 166  10
+-216 158  10 213 154  11 213 154  11 216 158  10
+-226 170  11 239 182  13 246 190  14 246 190  14
+-246 190  14 246 190  14 242 186  14 206 162  42
+-101 101 101  58  58  58  30  30  30  14  14  14
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  74  74  74 174 135  50 216 158  10
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 241 196  14 226 184  13
+- 61  42   6   2   2   6   2   2   6   2   2   6
+- 22  22  22 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 226 226 226 187 187 187 180 133  36
+-216 158  10 236 178  12 239 182  13 236 178  12
+-230 174  11 226 170  11 226 170  11 230 174  11
+-236 178  12 242 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 186  14 239 182  13
+-206 162  42 106 106 106  66  66  66  34  34  34
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 26  26  26  70  70  70 163 133  67 213 154  11
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 241 196  14
+-190 146  13  18  14   6   2   2   6   2   2   6
+- 46  46  46 246 246 246 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 221 221 221  86  86  86 156 107  11
+-216 158  10 236 178  12 242 186  14 246 186  14
+-242 186  14 239 182  13 239 182  13 242 186  14
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-242 186  14 225 175  15 142 122  72  66  66  66
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 26  26  26  70  70  70 163 133  67 210 150  10
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-232 195  16 121  92   8  34  34  34 106 106 106
+-221 221 221 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-242 242 242  82  82  82  18  14   6 163 110   8
+-216 158  10 236 178  12 242 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 242 186  14 163 133  67
+- 46  46  46  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78 163 133  67 210 150  10
+-236 178  12 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-241 196  14 215 174  15 190 178 144 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 218 218 218
+- 58  58  58   2   2   6  22  18   6 167 114   7
+-216 158  10 236 178  12 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 186  14 242 186  14 190 150  46
+- 54  54  54  22  22  22   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 38  38  38  86  86  86 180 133  36 213 154  11
+-236 178  12 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 232 195  16 190 146  13 214 214 214
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 170 170 170  26  26  26
+-  2   2   6   2   2   6  37  26   9 163 110   8
+-219 162  10 239 182  13 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 236 178  12 224 166  10 142 122  72
+- 46  46  46  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 109 106  95 192 133   9 224 166  10
+-242 186  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-242 186  14 226 184  13 210 162  10 142 110  46
+-226 226 226 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-198 198 198  66  66  66   2   2   6   2   2   6
+-  2   2   6   2   2   6  50  34   6 156 107  11
+-219 162  10 239 182  13 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 242 186  14
+-234 174  13 213 154  11 154 122  46  66  66  66
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 58  58  58 154 121  60 206 145  10 234 174  13
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 236 178  12 210 162  10 163 110   8
+- 61  42   6 138 138 138 218 218 218 250 250 250
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 210 210 210 144 144 144  66  66  66
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  61  42   6 163 110   8
+-216 158  10 236 178  12 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 239 182  13 230 174  11 216 158  10
+-190 142  34 124 112  88  70  70  70  38  38  38
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 62  62  62 168 124  44 206 145  10 224 166  10
+-236 178  12 239 182  13 242 186  14 242 186  14
+-246 186  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 236 178  12 216 158  10 175 118   6
+- 80  54   7   2   2   6   6   6   6  30  30  30
+- 54  54  54  62  62  62  50  50  50  38  38  38
+- 14  14  14   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  80  54   7 167 114   7
+-213 154  11 236 178  12 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 242 186  14 239 182  13 239 182  13
+-230 174  11 210 150  10 174 135  50 124 112  88
+- 82  82  82  54  54  54  34  34  34  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 158 118  36 192 133   9 200 144  11
+-216 158  10 219 162  10 224 166  10 226 170  11
+-230 174  11 236 178  12 239 182  13 239 182  13
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 230 174  11 210 150  10 163 110   8
+-104  69   6  10  10  10   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  91  60   6 167 114   7
+-206 145  10 230 174  11 242 186  14 246 190  14
+-246 190  14 246 190  14 246 186  14 242 186  14
+-239 182  13 230 174  11 224 166  10 213 154  11
+-180 133  36 124 112  88  86  86  86  58  58  58
+- 38  38  38  22  22  22  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 34  34  34  70  70  70 138 110  50 158 118  36
+-167 114   7 180 123   7 192 133   9 197 138  11
+-200 144  11 206 145  10 213 154  11 219 162  10
+-224 166  10 230 174  11 239 182  13 242 186  14
+-246 186  14 246 186  14 246 186  14 246 186  14
+-239 182  13 216 158  10 185 133  11 152  99   6
+-104  69   6  18  14   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  80  54   7 152  99   6
+-192 133   9 219 162  10 236 178  12 239 182  13
+-246 186  14 242 186  14 239 182  13 236 178  12
+-224 166  10 206 145  10 192 133   9 154 121  60
+- 94  94  94  62  62  62  42  42  42  22  22  22
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 18  18  18  34  34  34  58  58  58  78  78  78
+-101  98  89 124 112  88 142 110  46 156 107  11
+-163 110   8 167 114   7 175 118   6 180 123   7
+-185 133  11 197 138  11 210 150  10 219 162  10
+-226 170  11 236 178  12 236 178  12 234 174  13
+-219 162  10 197 138  11 163 110   8 130  83   6
+- 91  60   6  10  10  10   2   2   6   2   2   6
+- 18  18  18  38  38  38  38  38  38  38  38  38
+- 38  38  38  38  38  38  38  38  38  38  38  38
+- 38  38  38  38  38  38  26  26  26   2   2   6
+-  2   2   6   6   6   6  70  47   6 137  92   6
+-175 118   6 200 144  11 219 162  10 230 174  11
+-234 174  13 230 174  11 219 162  10 210 150  10
+-192 133   9 163 110   8 124 112  88  82  82  82
+- 50  50  50  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  22  22  22  34  34  34
+- 42  42  42  58  58  58  74  74  74  86  86  86
+-101  98  89 122 102  70 130  98  46 121  87  25
+-137  92   6 152  99   6 163 110   8 180 123   7
+-185 133  11 197 138  11 206 145  10 200 144  11
+-180 123   7 156 107  11 130  83   6 104  69   6
+- 50  34   6  54  54  54 110 110 110 101  98  89
+- 86  86  86  82  82  82  78  78  78  78  78  78
+- 78  78  78  78  78  78  78  78  78  78  78  78
+- 78  78  78  82  82  82  86  86  86  94  94  94
+-106 106 106 101 101 101  86  66  34 124  80   6
+-156 107  11 180 123   7 192 133   9 200 144  11
+-206 145  10 200 144  11 192 133   9 175 118   6
+-139 102  15 109 106  95  70  70  70  42  42  42
+- 22  22  22  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  10  10  10
+- 14  14  14  22  22  22  30  30  30  38  38  38
+- 50  50  50  62  62  62  74  74  74  90  90  90
+-101  98  89 112 100  78 121  87  25 124  80   6
+-137  92   6 152  99   6 152  99   6 152  99   6
+-138  86   6 124  80   6  98  70   6  86  66  30
+-101  98  89  82  82  82  58  58  58  46  46  46
+- 38  38  38  34  34  34  34  34  34  34  34  34
+- 34  34  34  34  34  34  34  34  34  34  34  34
+- 34  34  34  34  34  34  38  38  38  42  42  42
+- 54  54  54  82  82  82  94  86  76  91  60   6
+-134  86   6 156 107  11 167 114   7 175 118   6
+-175 118   6 167 114   7 152  99   6 121  87  25
+-101  98  89  62  62  62  34  34  34  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6   6   6   6  10  10  10
+- 18  18  18  22  22  22  30  30  30  42  42  42
+- 50  50  50  66  66  66  86  86  86 101  98  89
+-106  86  58  98  70   6 104  69   6 104  69   6
+-104  69   6  91  60   6  82  62  34  90  90  90
+- 62  62  62  38  38  38  22  22  22  14  14  14
+- 10  10  10  10  10  10  10  10  10  10  10  10
+- 10  10  10  10  10  10   6   6   6  10  10  10
+- 10  10  10  10  10  10  10  10  10  14  14  14
+- 22  22  22  42  42  42  70  70  70  89  81  66
+- 80  54   7 104  69   6 124  80   6 137  92   6
+-134  86   6 116  81   8 100  82  52  86  86  86
+- 58  58  58  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  10  10  10  14  14  14
+- 18  18  18  26  26  26  38  38  38  54  54  54
+- 70  70  70  86  86  86  94  86  76  89  81  66
+- 89  81  66  86  86  86  74  74  74  50  50  50
+- 30  30  30  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  34  34  34  58  58  58
+- 82  82  82  89  81  66  89  81  66  89  81  66
+- 94  86  66  94  86  76  74  74  74  50  50  50
+- 26  26  26  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6   6   6   6  14  14  14  18  18  18
+- 30  30  30  38  38  38  46  46  46  54  54  54
+- 50  50  50  42  42  42  30  30  30  18  18  18
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  26  26  26
+- 38  38  38  50  50  50  58  58  58  58  58  58
+- 54  54  54  42  42  42  30  30  30  18  18  18
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+-  6   6   6  10  10  10  14  14  14  18  18  18
+- 18  18  18  14  14  14  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 14  14  14  18  18  18  22  22  22  22  22  22
+- 18  18  18  14  14  14  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  3 3 3  0 0 0  0 0 0
++0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  1 1 1  0 0 0
++0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  2 1 0  2 1 0  3 2 2
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  2 2 2  0 0 0  3 4 3  26 28 28
++37 38 37  37 38 37  14 17 19  2 2 2  0 0 0  2 2 2
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  3 3 3  0 0 0  1 1 1  6 6 6
++2 2 2  0 0 0  3 3 3  4 4 4  4 4 4  4 4 4
++4 4 5  3 3 3  1 0 0  0 0 0  1 0 0  0 0 0
++1 1 1  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++2 2 2  0 0 0  0 0 0  14 17 19  60 74 84  137 136 137
++153 152 153  137 136 137  125 124 125  60 73 81  6 6 6  3 1 0
++0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  0 0 0  4 4 4  41 54 63  125 124 125
++60 73 81  6 6 6  4 0 0  3 3 3  4 4 4  4 4 4
++4 4 4  0 0 0  6 9 11  41 54 63  41 65 82  22 30 35
++2 2 2  2 1 0  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  2 2 2  0 0 0
++4 0 0  6 6 6  41 54 63  137 136 137  174 174 174  167 166 167
++165 164 165  165 164 165  163 162 163  163 162 163  125 124 125  41 54 63
++1 1 1  0 0 0  0 0 0  3 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
++3 3 3  2 0 0  4 0 0  60 73 81  156 155 156  167 166 167
++163 162 163  85 115 134  5 7 8  0 0 0  4 4 4  5 5 5
++0 0 0  2 5 5  55 98 126  90 154 193  90 154 193  72 125 159
++37 51 59  2 0 0  1 1 1  4 5 5  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 4 4  1 1 1  0 0 0  3 3 3
++37 38 37  125 124 125  163 162 163  174 174 174  158 157 158  158 157 158
++156 155 156  156 155 156  158 157 158  165 164 165  174 174 174  166 165 166
++125 124 125  16 19 21  1 0 0  0 0 0  0 0 0  4 4 4
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  1 1 1
++0 0 0  0 0 0  37 38 37  153 152 153  174 174 174  158 157 158
++174 174 174  163 162 163  37 38 37  4 3 3  4 0 0  1 1 1
++0 0 0  22 40 52  101 161 196  101 161 196  90 154 193  101 161 196
++64 123 161  14 17 19  0 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++5 5 5  2 2 2  0 0 0  4 0 0  24 26 27  85 115 134
++156 155 156  174 174 174  167 166 167  156 155 156  154 153 154  157 156 157
++156 155 156  156 155 156  155 154 155  153 152 153  158 157 158  167 166 167
++174 174 174  156 155 156  60 74 84  16 19 21  0 0 0  0 0 0
++1 1 1  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  6 6 6  3 3 3  0 0 0  4 0 0
++13 16 17  60 73 81  137 136 137  165 164 165  156 155 156  153 152 153
++174 174 174  177 184 187  60 73 81  3 1 0  0 0 0  1 1 2
++22 30 35  64 123 161  136 185 209  90 154 193  90 154 193  90 154 193
++90 154 193  21 29 34  0 0 0  3 2 2  4 4 5  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  3 3 3
++0 0 0  0 0 0  10 13 16  60 74 84  157 156 157  174 174 174
++174 174 174  158 157 158  153 152 153  154 153 154  156 155 156  155 154 155
++156 155 156  155 154 155  154 153 154  157 156 157  154 153 154  153 152 153
++163 162 163  174 174 174  177 184 187  137 136 137  60 73 81  13 16 17
++4 0 0  0 0 0  3 3 3  5 5 5  4 4 4  4 4 4
++5 5 5  4 4 4  1 1 1  0 0 0  3 3 3  41 54 63
++131 129 131  174 174 174  174 174 174  174 174 174  167 166 167  174 174 174
++190 197 201  137 136 137  24 26 27  4 0 0  16 21 25  50 82 103
++90 154 193  136 185 209  90 154 193  101 161 196  101 161 196  101 161 196
++31 91 132  3 6 7  0 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  2 2 2  0 0 0  4 0 0
++4 0 0  43 57 68  137 136 137  177 184 187  174 174 174  163 162 163
++155 154 155  155 154 155  156 155 156  155 154 155  158 157 158  165 164 165
++167 166 167  166 165 166  163 162 163  157 156 157  155 154 155  155 154 155
++153 152 153  156 155 156  167 166 167  174 174 174  174 174 174  131 129 131
++41 54 63  5 5 5  0 0 0  0 0 0  3 3 3  4 4 4
++1 1 1  0 0 0  1 0 0  26 28 28  125 124 125  174 174 174
++177 184 187  174 174 174  174 174 174  156 155 156  131 129 131  137 136 137
++125 124 125  24 26 27  4 0 0  41 65 82  90 154 193  136 185 209
++136 185 209  101 161 196  53 118 160  37 112 160  90 154 193  34 86 122
++7 12 15  0 0 0  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  3 3 3  0 0 0  0 0 0  5 5 5  37 38 37
++125 124 125  167 166 167  174 174 174  167 166 167  158 157 158  155 154 155
++156 155 156  156 155 156  156 155 156  163 162 163  167 166 167  155 154 155
++137 136 137  153 152 153  156 155 156  165 164 165  163 162 163  156 155 156
++156 155 156  156 155 156  155 154 155  158 157 158  166 165 166  174 174 174
++167 166 167  125 124 125  37 38 37  1 0 0  0 0 0  0 0 0
++0 0 0  24 26 27  60 74 84  158 157 158  174 174 174  174 174 174
++166 165 166  158 157 158  125 124 125  41 54 63  13 16 17  6 6 6
++6 6 6  37 38 37  80 127 157  136 185 209  101 161 196  101 161 196
++90 154 193  28 67 93  6 10 14  13 20 25  13 20 25  6 10 14
++1 1 2  4 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++1 1 1  1 0 0  4 3 3  37 38 37  60 74 84  153 152 153
++167 166 167  167 166 167  158 157 158  154 153 154  155 154 155  156 155 156
++157 156 157  158 157 158  167 166 167  167 166 167  131 129 131  43 57 68
++26 28 28  37 38 37  60 73 81  131 129 131  165 164 165  166 165 166
++158 157 158  155 154 155  156 155 156  156 155 156  156 155 156  158 157 158
++165 164 165  174 174 174  163 162 163  60 74 84  16 19 21  13 16 17
++60 73 81  131 129 131  174 174 174  174 174 174  167 166 167  165 164 165
++137 136 137  60 73 81  24 26 27  4 0 0  4 0 0  16 19 21
++52 104 138  101 161 196  136 185 209  136 185 209  90 154 193  27 99 146
++13 20 25  4 5 7  2 5 5  4 5 7  1 1 2  0 0 0
++4 4 4  4 4 4  3 3 3  2 2 2  2 2 2  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  3 3 3  0 0 0
++0 0 0  13 16 17  60 73 81  137 136 137  174 174 174  166 165 166
++158 157 158  156 155 156  157 156 157  156 155 156  155 154 155  158 157 158
++167 166 167  174 174 174  153 152 153  60 73 81  16 19 21  4 0 0
++4 0 0  4 0 0  6 6 6  26 28 28  60 74 84  158 157 158
++174 174 174  166 165 166  157 156 157  155 154 155  156 155 156  156 155 156
++155 154 155  158 157 158  167 166 167  167 166 167  131 129 131  125 124 125
++137 136 137  167 166 167  167 166 167  174 174 174  158 157 158  125 124 125
++16 19 21  4 0 0  4 0 0  10 13 16  49 76 92  107 159 188
++136 185 209  136 185 209  90 154 193  26 108 161  22 40 52  6 10 14
++2 3 3  1 1 2  1 1 2  4 4 5  4 4 5  4 4 5
++4 4 5  2 2 1  0 0 0  0 0 0  0 0 0  2 2 2
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  3 3 3  0 0 0  1 0 0  4 0 0
++37 51 59  131 129 131  167 166 167  167 166 167  163 162 163  157 156 157
++157 156 157  155 154 155  153 152 153  157 156 157  167 166 167  174 174 174
++153 152 153  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
++4 3 3  4 3 3  4 0 0  6 6 6  4 0 0  37 38 37
++125 124 125  174 174 174  174 174 174  165 164 165  156 155 156  154 153 154
++156 155 156  156 155 156  155 154 155  163 162 163  158 157 158  163 162 163
++174 174 174  174 174 174  174 174 174  125 124 125  37 38 37  0 0 0
++4 0 0  6 9 11  41 54 63  90 154 193  136 185 209  146 190 211
++136 185 209  37 112 160  22 40 52  6 10 14  3 6 7  1 1 2
++1 1 2  3 3 3  1 1 2  3 3 3  4 4 4  4 4 4
++2 2 2  2 0 0  16 19 21  37 38 37  24 26 27  0 0 0
++0 0 0  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
++4 4 4  0 0 0  0 0 0  0 0 0  26 28 28  120 125 127
++158 157 158  174 174 174  165 164 165  157 156 157  155 154 155  156 155 156
++153 152 153  153 152 153  167 166 167  174 174 174  174 174 174  125 124 125
++37 38 37  4 0 0  0 0 0  4 0 0  4 3 3  4 4 4
++4 4 4  4 4 4  5 5 5  4 0 0  4 0 0  4 0 0
++4 3 3  43 57 68  137 136 137  174 174 174  174 174 174  165 164 165
++154 153 154  153 152 153  153 152 153  153 152 153  163 162 163  174 174 174
++174 174 174  153 152 153  60 73 81  6 6 6  4 0 0  4 3 3
++32 43 50  80 127 157  136 185 209  146 190 211  146 190 211  90 154 193
++28 67 93  28 67 93  40 71 93  3 6 7  1 1 2  2 5 5
++50 82 103  79 117 143  26 37 45  0 0 0  3 3 3  1 1 1
++0 0 0  41 54 63  137 136 137  174 174 174  153 152 153  60 73 81
++2 0 0  0 0 0
++4 4 4  4 4 4  4 4 4  4 4 4  6 6 6  2 2 2
++0 0 0  2 0 0  24 26 27  60 74 84  153 152 153  174 174 174
++174 174 174  157 156 157  154 153 154  156 155 156  154 153 154  153 152 153
++165 164 165  174 174 174  177 184 187  137 136 137  43 57 68  6 6 6
++4 0 0  2 0 0  3 3 3  5 5 5  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  6 6 6  4 3 3
++4 0 0  4 0 0  24 26 27  60 73 81  153 152 153  174 174 174
++174 174 174  158 157 158  158 157 158  174 174 174  174 174 174  158 157 158
++60 74 84  24 26 27  4 0 0  4 0 0  17 23 27  59 113 148
++136 185 209  191 222 234  146 190 211  136 185 209  31 91 132  7 11 13
++22 40 52  101 161 196  90 154 193  6 9 11  3 4 4  43 95 132
++136 185 209  172 205 220  55 98 126  0 0 0  0 0 0  2 0 0
++26 28 28  153 152 153  177 184 187  167 166 167  177 184 187  165 164 165
++37 38 37  0 0 0
++4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
++13 16 17  60 73 81  137 136 137  174 174 174  174 174 174  165 164 165
++153 152 153  153 152 153  155 154 155  154 153 154  158 157 158  174 174 174
++177 184 187  163 162 163  60 73 81  16 19 21  4 0 0  4 0 0
++4 3 3  4 4 4  5 5 5  5 5 5  4 4 4  5 5 5
++5 5 5  5 5 5  5 5 5  4 4 4  4 4 4  5 5 5
++6 6 6  4 0 0  4 0 0  4 0 0  24 26 27  60 74 84
++166 165 166  174 174 174  177 184 187  165 164 165  125 124 125  24 26 27
++4 0 0  4 0 0  5 5 5  50 82 103  136 185 209  172 205 220
++146 190 211  136 185 209  26 108 161  22 40 52  7 12 15  44 81 103
++71 116 144  28 67 93  37 51 59  41 65 82  100 139 164  101 161 196
++90 154 193  90 154 193  28 67 93  0 0 0  0 0 0  26 28 28
++125 124 125  167 166 167  163 162 163  153 152 153  163 162 163  174 174 174
++85 115 134  4 0 0
++4 4 4  5 5 5  4 4 4  1 0 0  4 0 0  34 47 55
++125 124 125  174 174 174  174 174 174  167 166 167  157 156 157  153 152 153
++155 154 155  155 154 155  158 157 158  166 165 166  167 166 167  154 153 154
++125 124 125  26 28 28  4 0 0  4 0 0  4 0 0  5 5 5
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  1 1 1
++0 0 0  0 0 0  1 1 1  4 4 4  4 4 4  4 4 4
++5 5 5  5 5 5  4 3 3  4 0 0  4 0 0  6 6 6
++37 38 37  131 129 131  137 136 137  37 38 37  0 0 0  4 0 0
++4 5 5  43 61 72  90 154 193  172 205 220  146 190 211  136 185 209
++90 154 193  28 67 93  13 20 25  43 61 72  71 116 144  44 81 103
++2 5 5  7 11 13  59 113 148  101 161 196  90 154 193  28 67 93
++13 20 25  6 10 14  0 0 0  13 16 17  60 73 81  137 136 137
++166 165 166  158 157 158  156 155 156  154 153 154  167 166 167  174 174 174
++60 73 81  4 0 0
++4 4 4  4 4 4  0 0 0  3 3 3  60 74 84  174 174 174
++174 174 174  167 166 167  163 162 163  155 154 155  157 156 157  155 154 155
++156 155 156  163 162 163  167 166 167  158 157 158  125 124 125  37 38 37
++4 3 3  4 0 0  4 0 0  6 6 6  6 6 6  5 5 5
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  2 3 3
++10 13 16  7 11 13  1 0 0  0 0 0  2 2 1  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  4 0 0
++4 0 0  7 11 13  13 16 17  4 0 0  3 3 3  34 47 55
++80 127 157  146 190 211  172 205 220  136 185 209  136 185 209  136 185 209
++28 67 93  22 40 52  55 98 126  55 98 126  21 29 34  7 11 13
++50 82 103  101 161 196  101 161 196  35 83 115  13 20 25  2 2 1
++1 1 2  1 1 2  37 51 59  131 129 131  174 174 174  174 174 174
++167 166 167  163 162 163  163 162 163  167 166 167  174 174 174  125 124 125
++16 19 21  4 0 0
++4 4 4  4 0 0  4 0 0  60 74 84  174 174 174  174 174 174
++158 157 158  155 154 155  155 154 155  156 155 156  155 154 155  158 157 158
++167 166 167  165 164 165  131 129 131  60 73 81  13 16 17  4 0 0
++4 0 0  4 3 3  6 6 6  4 3 3  5 5 5  4 4 4
++4 4 4  3 2 2  0 0 0  0 0 0  7 11 13  45 69 86
++80 127 157  71 116 144  43 61 72  7 11 13  0 0 0  1 1 1
++4 3 3  4 4 4  4 4 4  4 4 4  6 6 6  5 5 5
++3 2 2  4 0 0  1 0 0  21 29 34  59 113 148  136 185 209
++146 190 211  136 185 209  136 185 209  136 185 209  136 185 209  136 185 209
++68 124 159  44 81 103  22 40 52  13 16 17  43 61 72  90 154 193
++136 185 209  59 113 148  21 29 34  3 4 3  1 1 1  0 0 0
++24 26 27  125 124 125  163 162 163  174 174 174  166 165 166  165 164 165
++163 162 163  125 124 125  125 124 125  125 124 125  125 124 125  26 28 28
++4 0 0  4 3 3
++3 3 3  0 0 0  24 26 27  153 152 153  177 184 187  158 157 158
++156 155 156  156 155 156  155 154 155  155 154 155  165 164 165  174 174 174
++155 154 155  60 74 84  26 28 28  4 0 0  4 0 0  3 1 0
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 3 3
++2 0 0  0 0 0  0 0 0  32 43 50  72 125 159  101 161 196
++136 185 209  101 161 196  101 161 196  79 117 143  32 43 50  0 0 0
++0 0 0  2 2 2  4 4 4  4 4 4  3 3 3  1 0 0
++0 0 0  4 5 5  49 76 92  101 161 196  146 190 211  146 190 211
++136 185 209  136 185 209  136 185 209  136 185 209  136 185 209  90 154 193
++28 67 93  13 16 17  37 51 59  80 127 157  136 185 209  90 154 193
++22 40 52  6 9 11  3 4 3  2 2 1  16 19 21  60 73 81
++137 136 137  163 162 163  158 157 158  166 165 166  167 166 167  153 152 153
++60 74 84  37 38 37  6 6 6  13 16 17  4 0 0  1 0 0
++3 2 2  4 4 4
++3 2 2  4 0 0  37 38 37  137 136 137  167 166 167  158 157 158
++157 156 157  154 153 154  157 156 157  167 166 167  174 174 174  125 124 125
++37 38 37  4 0 0  4 0 0  4 0 0  4 3 3  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
++0 0 0  16 21 25  55 98 126  90 154 193  136 185 209  101 161 196
++101 161 196  101 161 196  136 185 209  136 185 209  101 161 196  55 98 126
++14 17 19  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
++22 40 52  90 154 193  146 190 211  146 190 211  136 185 209  136 185 209
++136 185 209  136 185 209  136 185 209  101 161 196  35 83 115  7 11 13
++17 23 27  59 113 148  136 185 209  101 161 196  34 86 122  7 12 15
++2 5 5  3 4 3  6 6 6  60 73 81  131 129 131  163 162 163
++166 165 166  174 174 174  174 174 174  163 162 163  125 124 125  41 54 63
++13 16 17  4 0 0  4 0 0  4 0 0  1 0 0  2 2 2
++4 4 4  4 4 4
++1 1 1  2 1 0  43 57 68  137 136 137  153 152 153  153 152 153
++163 162 163  156 155 156  165 164 165  167 166 167  60 74 84  6 6 6
++4 0 0  4 0 0  5 5 5  4 4 4  4 4 4  4 4 4
++4 5 5  6 6 6  4 3 3  0 0 0  0 0 0  11 15 18
++40 71 93  100 139 164  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  136 185 209
++101 161 196  45 69 86  6 6 6  0 0 0  17 23 27  55 98 126
++136 185 209  146 190 211  136 185 209  136 185 209  136 185 209  136 185 209
++136 185 209  136 185 209  90 154 193  22 40 52  7 11 13  50 82 103
++136 185 209  136 185 209  53 118 160  22 40 52  7 11 13  2 5 5
++3 4 3  37 38 37  125 124 125  157 156 157  166 165 166  167 166 167
++174 174 174  174 174 174  137 136 137  60 73 81  4 0 0  4 0 0
++4 0 0  4 0 0  5 5 5  3 3 3  3 3 3  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  137 136 137  125 124 125  131 129 131
++155 154 155  167 166 167  174 174 174  60 74 84  6 6 6  4 0 0
++4 3 3  6 6 6  4 4 4  4 4 4  4 4 4  5 5 5
++4 4 4  1 1 1  0 0 0  3 6 7  41 65 82  72 125 159
++101 161 196  101 161 196  101 161 196  90 154 193  90 154 193  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
++136 185 209  136 185 209  80 127 157  55 98 126  101 161 196  146 190 211
++136 185 209  136 185 209  136 185 209  101 161 196  136 185 209  101 161 196
++136 185 209  101 161 196  35 83 115  22 30 35  101 161 196  172 205 220
++90 154 193  28 67 93  7 11 13  2 5 5  3 4 3  13 16 17
++85 115 134  167 166 167  174 174 174  174 174 174  174 174 174  174 174 174
++167 166 167  60 74 84  13 16 17  4 0 0  4 0 0  4 3 3
++6 6 6  5 5 5  4 4 4  5 5 5  4 4 4  5 5 5
++5 5 5  5 5 5
++1 1 1  4 0 0  41 54 63  137 136 137  137 136 137  125 124 125
++131 129 131  167 166 167  157 156 157  37 38 37  6 6 6  4 0 0
++6 6 6  5 5 5  4 4 4  4 4 4  4 5 5  2 2 1
++0 0 0  0 0 0  26 37 45  58 111 146  101 161 196  101 161 196
++101 161 196  90 154 193  90 154 193  90 154 193  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  136 185 209  136 185 209  136 185 209  146 190 211  136 185 209
++136 185 209  101 161 196  136 185 209  136 185 209  101 161 196  136 185 209
++101 161 196  136 185 209  136 185 209  136 185 209  136 185 209  16 89 141
++7 11 13  2 5 5  2 5 5  13 16 17  60 73 81  154 154 154
++174 174 174  174 174 174  174 174 174  174 174 174  163 162 163  125 124 125
++24 26 27  4 0 0  4 0 0  4 0 0  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
++5 5 5  4 4 4
++4 0 0  6 6 6  37 38 37  137 136 137  137 136 137  131 129 131
++131 129 131  153 152 153  131 129 131  26 28 28  4 0 0  4 3 3
++6 6 6  4 4 4  4 4 4  4 4 4  0 0 0  0 0 0
++13 20 25  51 88 114  90 154 193  101 161 196  101 161 196  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  101 161 196
++101 161 196  136 185 209  101 161 196  136 185 209  136 185 209  101 161 196
++136 185 209  101 161 196  136 185 209  101 161 196  101 161 196  101 161 196
++136 185 209  136 185 209  136 185 209  37 112 160  21 29 34  5 7 8
++2 5 5  13 16 17  43 57 68  131 129 131  174 174 174  174 174 174
++174 174 174  167 166 167  157 156 157  125 124 125  37 38 37  4 0 0
++4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  41 54 63  153 152 153  137 136 137  137 136 137
++137 136 137  153 152 153  125 124 125  24 26 27  4 0 0  3 2 2
++4 4 4  4 4 4  4 3 3  4 0 0  3 6 7  43 61 72
++64 123 161  101 161 196  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  101 161 196  90 154 193
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++136 185 209  101 161 196  101 161 196  136 185 209  136 185 209  101 161 196
++101 161 196  90 154 193  28 67 93  13 16 17  7 11 13  3 6 7
++37 51 59  125 124 125  163 162 163  174 174 174  167 166 167  166 165 166
++167 166 167  131 129 131  60 73 81  4 0 0  4 0 0  4 0 0
++3 3 3  5 5 5  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  137 136 137  153 152 153  137 136 137
++153 152 153  157 156 157  125 124 125  24 26 27  0 0 0  2 2 2
++4 4 4  4 4 4  2 0 0  0 0 0  28 67 93  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  64 123 161  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++90 154 193  101 161 196  101 161 196  101 161 196  90 154 193  136 185 209
++101 161 196  101 161 196  136 185 209  101 161 196  136 185 209  101 161 196
++101 161 196  101 161 196  136 185 209  101 161 196  101 161 196  90 154 193
++35 83 115  13 16 17  3 6 7  2 5 5  13 16 17  60 74 84
++154 154 154  166 165 166  165 164 165  158 157 158  163 162 163  157 156 157
++60 74 84  13 16 17  4 0 0  4 0 0  3 2 2  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  41 54 63  157 156 157  155 154 155  137 136 137
++153 152 153  158 157 158  137 136 137  26 28 28  2 0 0  2 2 2
++4 4 4  4 4 4  1 0 0  6 10 14  34 86 122  90 154 193
++64 123 161  90 154 193  64 123 161  90 154 193  90 154 193  90 154 193
++64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++136 185 209  101 161 196  136 185 209  90 154 193  26 108 161  22 40 52
++13 16 17  5 7 8  2 5 5  2 5 5  37 38 37  165 164 165
++174 174 174  163 162 163  154 154 154  165 164 165  167 166 167  60 73 81
++6 6 6  4 0 0  4 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  41 54 63  156 155 156  158 157 158  153 152 153
++156 155 156  165 164 165  137 136 137  26 28 28  0 0 0  2 2 2
++4 4 5  4 4 4  2 0 0  7 12 15  31 96 139  64 123 161
++90 154 193  64 123 161  90 154 193  90 154 193  64 123 161  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
++101 161 196  136 185 209  26 108 161  22 40 52  7 11 13  5 7 8
++2 5 5  2 5 5  2 5 5  2 2 1  37 38 37  158 157 158
++174 174 174  154 154 154  156 155 156  167 166 167  165 164 165  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  157 156 157  163 162 163  153 152 153
++158 157 158  167 166 167  137 136 137  26 28 28  2 0 0  2 2 2
++4 5 5  4 4 4  4 0 0  7 12 15  24 86 132  26 108 161
++37 112 160  64 123 161  90 154 193  64 123 161  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  101 161 196  90 154 193  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  136 185 209  101 161 196  136 185 209
++90 154 193  35 83 115  13 16 17  13 16 17  7 11 13  3 6 7
++5 7 8  6 6 6  3 4 3  2 2 1  30 32 34  154 154 154
++167 166 167  154 154 154  154 154 154  174 174 174  165 164 165  37 38 37
++6 6 6  4 0 0  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  163 162 163  166 165 166  154 154 154
++163 162 163  174 174 174  137 136 137  26 28 28  0 0 0  2 2 2
++4 5 5  4 4 5  1 1 2  6 10 14  28 67 93  18 97 151
++18 97 151  18 97 151  26 108 161  37 112 160  37 112 160  90 154 193
++64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++90 154 193  101 161 196  101 161 196  90 154 193  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  136 185 209  90 154 193  16 89 141
++13 20 25  7 11 13  5 7 8  5 7 8  2 5 5  4 5 5
++3 4 3  4 5 5  3 4 3  0 0 0  37 38 37  158 157 158
++174 174 174  158 157 158  158 157 158  167 166 167  174 174 174  41 54 63
++4 0 0  3 2 2  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  165 164 165  174 174 174  158 157 158
++167 166 167  174 174 174  153 152 153  26 28 28  2 0 0  2 2 2
++4 5 5  4 4 4  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
++26 108 161  37 112 160  53 118 160  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  136 185 209  90 154 193  26 108 161  22 40 52  13 16 17
++7 11 13  3 6 7  5 7 8  5 7 8  2 5 5  4 5 5
++4 5 5  6 6 6  3 4 3  0 0 0  30 32 34  158 157 158
++174 174 174  156 155 156  155 154 155  165 164 165  154 153 154  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  167 166 167  174 174 174  163 162 163
++174 174 174  174 174 174  153 152 153  26 28 28  0 0 0  3 3 3
++5 5 5  4 4 4  1 1 2  7 12 15  28 67 93  18 97 151
++18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++90 154 193  26 108 161  90 154 193  90 154 193  90 154 193  101 161 196
++101 161 196  26 108 161  22 40 52  13 16 17  7 11 13  2 5 5
++2 5 5  6 6 6  2 5 5  4 5 5  4 5 5  4 5 5
++3 4 3  5 5 5  3 4 3  2 0 0  30 32 34  137 136 137
++153 152 153  137 136 137  131 129 131  137 136 137  131 129 131  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  167 166 167  174 174 174  166 165 166
++174 174 174  177 184 187  153 152 153  30 32 34  1 0 0  3 3 3
++5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  90 154 193  90 154 193  26 108 161
++35 83 115  13 16 17  7 11 13  5 7 8  3 6 7  5 7 8
++2 5 5  6 6 6  4 5 5  4 5 5  3 4 3  4 5 5
++3 4 3  6 6 6  3 4 3  0 0 0  26 28 28  125 124 125
++131 129 131  125 124 125  125 124 125  131 129 131  131 129 131  37 38 37
++4 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  174 174 174  177 184 187  167 166 167
++174 174 174  177 184 187  153 152 153  30 32 34  0 0 0  3 3 3
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
++18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  90 154 193  26 108 161  26 108 161  24 86 132  13 20 25
++7 11 13  13 20 25  22 40 52  5 7 8  3 4 3  3 4 3
++4 5 5  3 4 3  4 5 5  3 4 3  4 5 5  3 4 3
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
++174 174 174  190 197 201  157 156 157  30 32 34  1 0 0  3 3 3
++5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  19 95 150  19 95 150  18 97 151  18 97 151  26 108 161
++18 97 151  26 108 161  26 108 161  26 108 161  26 108 161  90 154 193
++26 108 161  26 108 161  26 108 161  22 40 52  2 5 5  3 4 3
++28 67 93  37 112 160  34 86 122  2 5 5  3 4 3  3 4 3
++3 4 3  3 4 3  3 4 3  2 2 1  3 4 3  4 4 4
++4 5 5  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
++174 174 174  190 197 201  158 157 158  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
++10 87 144  19 95 150  19 95 150  18 97 151  18 97 151  18 97 151
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++18 97 151  22 40 52  2 5 5  2 2 1  22 40 52  26 108 161
++90 154 193  37 112 160  22 40 52  3 4 3  13 20 25  22 30 35
++3 6 7  1 1 1  2 2 2  6 9 11  5 5 5  4 3 3
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  177 184 187  193 200 203  174 174 174
++177 184 187  193 200 203  163 162 163  30 32 34  4 0 0  2 2 2
++5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
++10 87 144  10 87 144  19 95 150  19 95 150  19 95 150  18 97 151
++26 108 161  26 108 161  26 108 161  90 154 193  26 108 161  28 67 93
++6 10 14  2 5 5  13 20 25  24 86 132  37 112 160  90 154 193
++10 87 144  7 12 15  2 5 5  28 67 93  37 112 160  28 67 93
++2 2 1  7 12 15  35 83 115  28 67 93  3 6 7  1 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  174 174 174  190 197 201  174 174 174
++177 184 187  193 200 203  163 162 163  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  16 89 141  19 95 150  10 87 144  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  28 67 93  6 10 14  1 1 2
++7 12 15  28 67 93  26 108 161  16 89 141  24 86 132  21 29 34
++3 4 3  21 29 34  37 112 160  37 112 160  27 99 146  21 29 34
++21 29 34  26 108 161  90 154 193  35 83 115  1 1 2  2 0 0
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
++190 197 201  193 200 203  165 164 165  37 38 37  4 0 0  2 2 2
++5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
++10 87 144  10 87 144  16 89 141  18 97 151  18 97 151  10 87 144
++24 86 132  24 86 132  13 20 25  4 5 7  4 5 7  22 40 52
++18 97 151  37 112 160  26 108 161  7 12 15  1 1 1  0 0 0
++28 67 93  37 112 160  26 108 161  28 67 93  22 40 52  28 67 93
++26 108 161  90 154 193  26 108 161  10 87 144  0 0 0  2 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  193 200 203  174 174 174
++190 197 201  193 200 203  165 164 165  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  10 87 144  18 97 151  28 67 93  6 10 14
++0 0 0  1 1 2  4 5 7  13 20 25  16 89 141  26 108 161
++26 108 161  26 108 161  24 86 132  6 9 11  2 3 3  22 40 52
++37 112 160  16 89 141  22 40 52  28 67 93  26 108 161  26 108 161
++90 154 193  26 108 161  26 108 161  28 67 93  1 1 1  4 0 0
++4 4 4  5 5 5  3 3 3  4 0 0  26 28 28  124 126 130
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
++193 200 203  193 200 203  167 166 167  37 38 37  4 0 0  2 2 2
++5 5 5  4 4 4  4 0 0  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  13 20 25  4 5 7
++1 1 2  1 1 1  22 40 52  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  24 86 132  22 40 52  22 40 52
++22 40 52  22 40 52  10 87 144  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  90 154 193  10 87 144  0 0 0  4 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  205 212 215  167 166 167  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  22 40 52  1 1 2
++2 0 0  1 1 2  24 86 132  26 108 161  26 108 161  26 108 161
++26 108 161  19 95 150  16 89 141  10 87 144  22 40 52  22 40 52
++10 87 144  26 108 161  37 112 160  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  28 67 93  2 0 0  3 1 0
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
++193 200 203  193 200 203  174 174 174  37 38 37  4 0 0  2 2 2
++5 5 5  4 4 4  3 2 2  1 1 2  13 20 25  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  13 20 25
++13 20 25  22 40 52  10 87 144  18 97 151  18 97 151  26 108 161
++10 87 144  13 20 25  6 10 14  21 29 34  24 86 132  18 97 151
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  90 154 193  18 97 151  13 20 25  0 0 0  4 3 3
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  220 221 221  167 166 167  30 32 34  1 0 0  2 2 2
++5 5 5  4 4 4  4 4 5  2 5 5  4 5 7  13 20 25
++28 67 93  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  18 97 151  18 97 151
++28 67 93  2 3 3  0 0 0  28 67 93  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  10 87 144  13 20 25  1 1 2  3 2 2  4 4 4
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
++193 200 203  193 200 203  174 174 174  26 28 28  4 0 0  4 3 3
++5 5 5  4 4 4  4 4 4  4 4 5  1 1 2  2 5 5
++4 5 7  22 40 52  10 87 144  10 87 144  18 97 151  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  18 97 151
++10 87 144  28 67 93  22 40 52  10 87 144  26 108 161  18 97 151
++18 97 151  18 97 151  26 108 161  26 108 161  26 108 161  26 108 161
++22 40 52  1 1 2  0 0 0  2 3 3  4 4 4  4 4 4
++4 4 4  5 5 5  4 4 4  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  220 221 221  190 197 201  41 54 63  4 0 0  2 2 2
++6 6 6  4 4 4  4 4 4  4 4 5  4 4 5  3 3 3
++1 1 2  1 1 2  6 10 14  22 40 52  10 87 144  18 97 151
++18 97 151  10 87 144  10 87 144  10 87 144  18 97 151  10 87 144
++10 87 144  18 97 151  26 108 161  18 97 151  18 97 151  10 87 144
++26 108 161  26 108 161  26 108 161  10 87 144  28 67 93  6 10 14
++1 1 2  1 1 2  4 3 3  4 4 5  4 4 4  4 4 4
++5 5 5  5 5 5  1 1 1  4 0 0  37 51 59  137 136 137
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  193 200 203  174 174 174
++193 200 203  193 200 203  220 221 221  137 136 137  13 16 17  4 0 0
++2 2 2  4 4 4  4 4 4  4 4 4  4 4 4  4 4 5
++4 4 5  4 3 3  1 1 2  4 5 7  13 20 25  28 67 93
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
++10 87 144  18 97 151  18 97 151  10 87 144  18 97 151  26 108 161
++26 108 161  18 97 151  28 67 93  6 10 14  0 0 0  0 0 0
++2 3 3  4 5 5  4 4 5  4 4 4  4 4 4  5 5 5
++3 3 3  1 1 1  0 0 0  16 19 21  125 124 125  137 136 137
++131 129 131  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++193 200 203  190 197 201  220 221 221  220 221 221  153 152 153  30 32 34
++0 0 0  0 0 0  2 2 2  4 4 4  4 4 4  4 4 4
++4 4 4  4 5 5  4 5 7  1 1 2  1 1 2  4 5 7
++13 20 25  28 67 93  10 87 144  18 97 151  10 87 144  10 87 144
++10 87 144  10 87 144  10 87 144  18 97 151  26 108 161  18 97 151
++28 67 93  7 12 15  0 0 0  0 0 0  2 2 1  4 4 4
++4 5 5  4 5 5  4 4 4  4 4 4  3 3 3  0 0 0
++0 0 0  0 0 0  37 38 37  125 124 125  158 157 158  131 129 131
++125 124 125  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  4 0 0  41 54 63  193 200 203  220 221 221  174 174 174
++193 200 203  193 200 203  193 200 203  220 221 221  244 246 246  193 200 203
++120 125 127  5 5 5  1 0 0  0 0 0  1 1 1  4 4 4
++4 4 4  4 4 4  4 5 5  4 5 5  4 4 5  1 1 2
++4 5 7  4 5 7  22 40 52  10 87 144  10 87 144  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  10 87 144  13 20 25
++4 5 7  2 3 3  1 1 2  4 4 4  4 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 1 2
++24 26 27  60 74 84  153 152 153  163 162 163  137 136 137  125 124 125
++125 124 125  125 124 125  125 124 125  137 136 137  125 124 125  26 28 28
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  26 28 28  156 155 156  220 221 221  220 221 221
++174 174 174  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
++220 221 221  167 166 167  60 73 81  7 11 13  0 0 0  0 0 0
++3 3 3  4 4 4  4 4 4  4 4 4  4 4 5  4 4 5
++4 4 5  1 1 2  1 1 2  4 5 7  22 40 52  10 87 144
++10 87 144  10 87 144  10 87 144  22 40 52  4 5 7  1 1 2
++1 1 2  4 4 5  4 4 4  4 4 4  4 4 4  4 4 4
++5 5 5  2 2 2  0 0 0  4 0 0  16 19 21  60 73 81
++137 136 137  167 166 167  158 157 158  137 136 137  131 129 131  131 129 131
++125 124 125  125 124 125  131 129 131  155 154 155  60 74 84  5 7 8
++0 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 0 0  4 0 0  60 73 81  193 200 203  220 221 221
++193 200 203  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
++220 221 221  220 221 221  220 221 221  137 136 137  43 57 68  6 6 6
++4 0 0  1 1 1  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 5  4 4 5  3 2 2  1 1 2  2 5 5  13 20 25
++22 40 52  22 40 52  13 20 25  2 3 3  1 1 2  3 3 3
++4 5 7  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++1 1 1  0 0 0  2 3 3  41 54 63  131 129 131  166 165 166
++166 165 166  155 154 155  153 152 153  137 136 137  137 136 137  125 124 125
++125 124 125  137 136 137  137 136 137  125 124 125  37 38 37  4 3 3
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  6 6 6  6 6 6  13 16 17  60 73 81  167 166 167
++220 221 221  220 221 221  220 221 221  193 200 203  193 200 203  193 200 203
++205 212 215  220 221 221  220 221 221  244 246 246  205 212 215  125 124 125
++24 26 27  0 0 0  0 0 0  2 2 2  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 5  1 1 2  4 5 7
++4 5 7  4 5 7  1 1 2  3 2 2  4 4 5  4 4 4
++4 4 4  4 4 4  5 5 5  4 4 4  0 0 0  0 0 0
++2 0 0  26 28 28  125 124 125  174 174 174  174 174 174  166 165 166
++156 155 156  153 152 153  137 136 137  137 136 137  131 129 131  137 136 137
++137 136 137  137 136 137  60 74 84  30 32 34  4 0 0  4 0 0
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  6 6 6  4 0 0  4 0 0  6 6 6  26 28 28
++125 124 125  174 174 174  220 221 221  220 221 221  220 221 221  193 200 203
++205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
++193 200 203  60 74 84  13 16 17  4 0 0  0 0 0  3 3 3
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 5  3 3 3
++1 1 2  3 3 3  4 4 5  4 4 5  4 4 4  4 4 4
++5 5 5  5 5 5  2 2 2  0 0 0  0 0 0  13 16 17
++60 74 84  174 174 174  193 200 203  174 174 174  167 166 167  163 162 163
++153 152 153  153 152 153  137 136 137  137 136 137  153 152 153  137 136 137
++125 124 125  41 54 63  24 26 27  4 0 0  4 0 0  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++6 6 6  37 38 37  131 129 131  220 221 221  220 221 221  220 221 221
++193 200 203  193 200 203  220 221 221  205 212 215  220 221 221  244 246 246
++244 246 246  244 246 246  174 174 174  41 54 63  0 0 0  0 0 0
++0 0 0  4 4 4  5 5 5  5 5 5  4 4 4  4 4 5
++4 4 5  4 4 5  4 4 4  4 4 4  6 6 6  6 6 6
++3 3 3  0 0 0  2 0 0  13 16 17  60 73 81  156 155 156
++220 221 221  193 200 203  174 174 174  165 164 165  163 162 163  154 153 154
++153 152 153  153 152 153  158 157 158  163 162 163  137 136 137  60 73 81
++13 16 17  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  4 3 3  6 6 6  6 6 6  6 6 6
++6 6 6  6 6 6  6 6 6  37 38 37  167 166 167  244 246 246
++244 246 246  220 221 221  205 212 215  205 212 215  220 221 221  193 200 203
++220 221 221  244 246 246  244 246 246  244 246 246  137 136 137  37 38 37
++3 2 2  0 0 0  1 1 1  5 5 5  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 4 4  1 1 1
++0 0 0  5 5 5  43 57 68  153 152 153  193 200 203  220 221 221
++177 184 187  174 174 174  167 166 167  166 165 166  158 157 158  157 156 157
++158 157 158  166 165 166  156 155 156  85 115 134  13 16 17  4 0 0
++4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  6 6 6  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  6 6 6  6 6 6  13 16 17  60 73 81
++177 184 187  220 221 221  220 221 221  220 221 221  205 212 215  220 221 221
++220 221 221  205 212 215  220 221 221  244 246 246  244 246 246  205 212 215
++125 124 125  30 32 34  0 0 0  0 0 0  2 2 2  5 5 5
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 0 0
++37 38 37  131 129 131  205 212 215  220 221 221  193 200 203  174 174 174
++174 174 174  174 174 174  167 166 167  165 164 165  166 165 166  167 166 167
++158 157 158  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  4 3 3  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++26 28 28  125 124 125  205 212 215  220 221 221  220 221 221  220 221 221
++205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
++244 246 246  190 197 201  60 74 84  16 19 21  4 0 0  0 0 0
++0 0 0  0 0 0  0 0 0  0 0 0  16 19 21  120 125 127
++177 184 187  220 221 221  205 212 215  177 184 187  174 174 174  177 184 187
++174 174 174  174 174 174  167 166 167  174 174 174  166 165 166  137 136 137
++60 73 81  13 16 17  4 0 0  4 0 0  4 3 3  6 6 6
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  5 5 5  4 3 3  6 6 6  4 0 0
++6 6 6  6 6 6  4 0 0  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  37 38 37  137 136 137  193 200 203  220 221 221
++220 221 221  205 212 215  220 221 221  205 212 215  205 212 215  220 221 221
++220 221 221  220 221 221  244 246 246  166 165 166  43 57 68  2 2 2
++0 0 0  4 0 0  16 19 21  60 73 81  157 156 157  202 210 214
++220 221 221  193 200 203  177 184 187  177 184 187  177 184 187  174 174 174
++174 174 174  174 174 174  174 174 174  157 156 157  60 74 84  24 26 27
++4 0 0  4 0 0  4 0 0  6 6 6  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  6 6 6  4 0 0
++4 0 0  4 0 0  6 6 6  24 26 27  60 73 81  167 166 167
++220 221 221  220 221 221  220 221 221  205 212 215  205 212 215  205 212 215
++205 212 215  220 221 221  220 221 221  220 221 221  205 212 215  137 136 137
++60 74 84  125 124 125  137 136 137  190 197 201  220 221 221  193 200 203
++177 184 187  177 184 187  177 184 187  174 174 174  174 174 174  177 184 187
++190 197 201  174 174 174  125 124 125  37 38 37  6 6 6  4 0 0
++4 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  4 3 3  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
++125 124 125  193 200 203  244 246 246  220 221 221  205 212 215  205 212 215
++205 212 215  193 200 203  205 212 215  205 212 215  220 221 221  220 221 221
++193 200 203  193 200 203  205 212 215  193 200 203  193 200 203  177 184 187
++190 197 201  190 197 201  174 174 174  190 197 201  193 200 203  190 197 201
++153 152 153  60 73 81  4 0 0  4 0 0  4 0 0  3 2 2
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
++6 6 6  4 3 3  4 3 3  4 3 3  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  4 0 0
++4 0 0  26 28 28  131 129 131  220 221 221  244 246 246  220 221 221
++205 212 215  193 200 203  205 212 215  193 200 203  193 200 203  205 212 215
++220 221 221  193 200 203  193 200 203  193 200 203  190 197 201  174 174 174
++174 174 174  190 197 201  193 200 203  193 200 203  167 166 167  125 124 125
++6 6 6  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++5 5 5  4 3 3  5 5 5  6 6 6  4 3 3  5 5 5
++6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++4 0 0  4 0 0  6 6 6  41 54 63  158 157 158  220 221 221
++220 221 221  220 221 221  193 200 203  193 200 203  193 200 203  190 197 201
++190 197 201  190 197 201  190 197 201  190 197 201  174 174 174  193 200 203
++193 200 203  220 221 221  174 174 174  125 124 125  37 38 37  4 0 0
++4 0 0  4 3 3  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  4 3 3  4 3 3  5 5 5
++4 3 3  6 6 6  5 5 5  4 3 3  6 6 6  6 6 6
++6 6 6  6 6 6  4 0 0  4 0 0  13 16 17  60 73 81
++174 174 174  220 221 221  220 221 221  205 212 215  190 197 201  174 174 174
++193 200 203  174 174 174  190 197 201  174 174 174  193 200 203  220 221 221
++193 200 203  131 129 131  37 38 37  6 6 6  4 0 0  4 0 0
++6 6 6  6 6 6  4 3 3  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
++5 5 5  4 3 3  4 3 3  5 5 5  4 3 3  4 3 3
++5 5 5  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
++6 6 6  125 124 125  174 174 174  220 221 221  220 221 221  193 200 203
++193 200 203  193 200 203  193 200 203  193 200 203  220 221 221  158 157 158
++60 73 81  6 6 6  4 0 0  4 0 0  5 5 5  6 6 6
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
++5 5 5  5 5 5  6 6 6  6 6 6  4 0 0  4 0 0
++4 0 0  4 0 0  26 28 28  125 124 125  174 174 174  193 200 203
++193 200 203  174 174 174  193 200 203  167 166 167  125 124 125  6 6 6
++6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  5 5 5
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++4 3 3  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++6 6 6  4 0 0  4 0 0  6 6 6  37 38 37  125 124 125
++153 152 153  131 129 131  125 124 125  37 38 37  6 6 6  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  4 3 3  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
++6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
++24 26 27  24 26 27  6 6 6  6 6 6  6 6 6  4 0 0
++6 6 6  6 6 6  4 0 0  6 6 6  5 5 5  4 3 3
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  4 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  5 5 5
++5 5 5  5 5 5  4 0 0  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  4 0 0
++6 6 6  4 3 3  5 5 5  4 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++4 3 3  6 6 6  4 3 3  6 6 6  6 6 6  6 6 6
++4 0 0  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++6 6 6  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  5 5 5  4 0 0  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  4 0 0  6 6 6
++4 3 3  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  5 5 5  6 6 6  4 3 3
++4 3 3  6 6 6  6 6 6  4 3 3  6 6 6  4 3 3
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  6 6 6
++5 5 5  4 3 3  4 3 3  4 3 3  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
++5 5 5  4 3 3  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
+diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
+index fe92eed..106e085 100644
+--- a/drivers/video/mb862xx/mb862xxfb_accel.c
++++ b/drivers/video/mb862xx/mb862xxfb_accel.c
+@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
+       struct mb862xxfb_par *par = info->par;
+       if (info->var.bits_per_pixel == 32) {
+-              info->fbops->fb_fillrect = cfb_fillrect;
+-              info->fbops->fb_copyarea = cfb_copyarea;
+-              info->fbops->fb_imageblit = cfb_imageblit;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
++              *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
++              *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
++              pax_close_kernel();
+       } else {
+               outreg(disp, GC_L0EM, 3);
+-              info->fbops->fb_fillrect = mb86290fb_fillrect;
+-              info->fbops->fb_copyarea = mb86290fb_copyarea;
+-              info->fbops->fb_imageblit = mb86290fb_imageblit;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
++              *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
++              *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
++              pax_close_kernel();
+       }
+       outreg(draw, GDC_REG_DRAW_BASE, 0);
+       outreg(draw, GDC_REG_MODE_MISC, 0x8000);
+diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
+index ff22871..b129bed 100644
+--- a/drivers/video/nvidia/nvidia.c
++++ b/drivers/video/nvidia/nvidia.c
+@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
+       info->fix.line_length = (info->var.xres_virtual *
+                                info->var.bits_per_pixel) >> 3;
+       if (info->var.accel_flags) {
+-              info->fbops->fb_imageblit = nvidiafb_imageblit;
+-              info->fbops->fb_fillrect = nvidiafb_fillrect;
+-              info->fbops->fb_copyarea = nvidiafb_copyarea;
+-              info->fbops->fb_sync = nvidiafb_sync;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
++              *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
++              *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
++              *(void **)&info->fbops->fb_sync = nvidiafb_sync;
++              pax_close_kernel();
+               info->pixmap.scan_align = 4;
+               info->flags &= ~FBINFO_HWACCEL_DISABLED;
+               info->flags |= FBINFO_READS_FAST;
+               NVResetGraphics(info);
+       } else {
+-              info->fbops->fb_imageblit = cfb_imageblit;
+-              info->fbops->fb_fillrect = cfb_fillrect;
+-              info->fbops->fb_copyarea = cfb_copyarea;
+-              info->fbops->fb_sync = NULL;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
++              *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
++              *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
++              *(void **)&info->fbops->fb_sync = NULL;
++              pax_close_kernel();
+               info->pixmap.scan_align = 1;
+               info->flags |= FBINFO_HWACCEL_DISABLED;
+               info->flags &= ~FBINFO_READS_FAST;
+@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
+       info->pixmap.size = 8 * 1024;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-      if (!hwcur)
+-          info->fbops->fb_cursor = NULL;
++      if (!hwcur) {
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_cursor = NULL;
++              pax_close_kernel();
++      }
+       info->var.accel_flags = (!noaccel);
+diff --git a/drivers/video/output.c b/drivers/video/output.c
+index 0d6f2cd..6285b97 100644
+--- a/drivers/video/output.c
++++ b/drivers/video/output.c
+@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
+       new_dev->props = op;
+       new_dev->dev.class = &video_output_class;
+       new_dev->dev.parent = dev;
+-      dev_set_name(&new_dev->dev, name);
++      dev_set_name(&new_dev->dev, "%s", name);
+       dev_set_drvdata(&new_dev->dev, devdata);
+       ret_code = device_register(&new_dev->dev);
+       if (ret_code) {
+diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
+index 05c2dc3..ea1f391 100644
+--- a/drivers/video/s1d13xxxfb.c
++++ b/drivers/video/s1d13xxxfb.c
+@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
+       switch(prod_id) {
+       case S1D13506_PROD_ID:  /* activate acceleration */
+-              s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+-              s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
++              pax_open_kernel();
++              *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
++              *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
++              pax_close_kernel();
+               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+                       FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
+               break;
+diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
+index b2b33fc..f9f4658 100644
+--- a/drivers/video/smscufx.c
++++ b/drivers/video/smscufx.c
+@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = ufx_ops_mmap;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
++              pax_close_kernel();
+       }
+       pr_debug("released /dev/fb%d user=%d count=%d",
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index ec03e72..f578436 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
+               dlfb_urb_completion(urb);
+ error:
+-      atomic_add(bytes_sent, &dev->bytes_sent);
+-      atomic_add(bytes_identical, &dev->bytes_identical);
+-      atomic_add(width*height*2, &dev->bytes_rendered);
++      atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++      atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++      atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
+       end_cycles = get_cycles();
+-      atomic_add(((unsigned int) ((end_cycles - start_cycles)
++      atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+                   >> 10)), /* Kcycles */
+                  &dev->cpu_kcycles_used);
+@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
+               dlfb_urb_completion(urb);
+ error:
+-      atomic_add(bytes_sent, &dev->bytes_sent);
+-      atomic_add(bytes_identical, &dev->bytes_identical);
+-      atomic_add(bytes_rendered, &dev->bytes_rendered);
++      atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++      atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++      atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
+       end_cycles = get_cycles();
+-      atomic_add(((unsigned int) ((end_cycles - start_cycles)
++      atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+                   >> 10)), /* Kcycles */
+                  &dev->cpu_kcycles_used);
+ }
+@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = dlfb_ops_mmap;
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
++              pax_close_kernel();
+       }
+       pr_warn("released /dev/fb%d user=%d count=%d\n",
+@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_rendered));
++                      atomic_read_unchecked(&dev->bytes_rendered));
+ }
+ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_identical));
++                      atomic_read_unchecked(&dev->bytes_identical));
+ }
+ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_sent));
++                      atomic_read_unchecked(&dev->bytes_sent));
+ }
+ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->cpu_kcycles_used));
++                      atomic_read_unchecked(&dev->cpu_kcycles_used));
+ }
+ static ssize_t edid_show(
+@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+-      atomic_set(&dev->bytes_rendered, 0);
+-      atomic_set(&dev->bytes_identical, 0);
+-      atomic_set(&dev->bytes_sent, 0);
+-      atomic_set(&dev->cpu_kcycles_used, 0);
++      atomic_set_unchecked(&dev->bytes_rendered, 0);
++      atomic_set_unchecked(&dev->bytes_identical, 0);
++      atomic_set_unchecked(&dev->bytes_sent, 0);
++      atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
+       return count;
+ }
+diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
+index e328a61..1b08ecb 100644
+--- a/drivers/video/uvesafb.c
++++ b/drivers/video/uvesafb.c
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
+       if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+               par->pmi_setpal = par->ypan = 0;
+       } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++              par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++              if (!par->pmi_code) {
++                      par->pmi_setpal = par->ypan = 0;
++                      return 0;
++              }
++#endif
++
+               par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+                                               + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pax_open_kernel();
++              memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++              pax_close_kernel();
++
++              par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
++              par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
++#else
+               par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+               par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+               printk(KERN_INFO "uvesafb: protected mode interface info at "
+                                "%04x:%04x\n",
+                                (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
+       par->ypan = ypan;
+       if (par->pmi_setpal || par->ypan) {
++#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
+               if (__supported_pte_mask & _PAGE_NX) {
+                       par->pmi_setpal = par->ypan = 0;
+                       printk(KERN_WARNING "uvesafb: NX protection is actively."
+                               "We have better not to use the PMI.\n");
+-              } else {
++              } else
++#endif
+                       uvesafb_vbe_getpmi(task, par);
+-              }
+       }
+ #else
+       /* The protected mode interface is not available on non-x86. */
+@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
+       info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
+       /* Disable blanking if the user requested so. */
+-      if (!blank)
+-              info->fbops->fb_blank = NULL;
++      if (!blank) {
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_blank = NULL;
++              pax_close_kernel();
++      }
+       /*
+        * Find out how much IO memory is required for the mode with
+@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
+       info->flags = FBINFO_FLAG_DEFAULT |
+                       (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
+-      if (!par->ypan)
+-              info->fbops->fb_pan_display = NULL;
++      if (!par->ypan) {
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_pan_display = NULL;
++              pax_close_kernel();
++      }
+ }
+ static void uvesafb_init_mtrr(struct fb_info *info)
+@@ -1836,6 +1866,11 @@ out:
+       if (par->vbe_modes)
+               kfree(par->vbe_modes);
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      if (par->pmi_code)
++              module_free_exec(NULL, par->pmi_code);
++#endif
++
+       framebuffer_release(info);
+       return err;
+ }
+@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
+                               kfree(par->vbe_state_orig);
+                       if (par->vbe_state_saved)
+                               kfree(par->vbe_state_saved);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++                      if (par->pmi_code)
++                              module_free_exec(NULL, par->pmi_code);
++#endif
++
+               }
+               framebuffer_release(info);
+diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
+index 501b340..d80aa17 100644
+--- a/drivers/video/vesafb.c
++++ b/drivers/video/vesafb.c
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -52,8 +53,8 @@ static int   vram_remap __initdata;          /* Set amount of memory to be used */
+ static int   vram_total __initdata;           /* Set total amount of memory */
+ static int   pmi_setpal __read_mostly = 1;    /* pmi for palette changes ??? */
+ static int   ypan       __read_mostly;                /* 0..nothing, 1..ypan, 2..ywrap */
+-static void  (*pmi_start)(void) __read_mostly;
+-static void  (*pmi_pal)  (void) __read_mostly;
++static void  (*pmi_start)(void) __read_only;
++static void  (*pmi_pal)  (void) __read_only;
+ static int   depth      __read_mostly;
+ static int   vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
+       unsigned int size_vmode;
+       unsigned int size_remap;
+       unsigned int size_total;
++      void *pmi_code = NULL;
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+               return -ENODEV;
+@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
+               size_remap = size_total;
+       vesafb_fix.smem_len = size_remap;
+-#ifndef __i386__
+-      screen_info.vesapm_seg = 0;
+-#endif
+-
+       if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+               printk(KERN_WARNING
+                      "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
+       printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+              vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      pmi_code = module_alloc_exec(screen_info.vesapm_size);
++      if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++      if (0)
++#endif
++
++#endif
++      screen_info.vesapm_seg = 0;
++
+       if (screen_info.vesapm_seg) {
+-              printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+-                     screen_info.vesapm_seg,screen_info.vesapm_off);
++              printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++                     screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+       }
+       if (screen_info.vesapm_seg < 0xc000)
+@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
+       if (ypan || pmi_setpal) {
+               unsigned short *pmi_base;
++
+               pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+-              pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+-              pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pax_open_kernel();
++              memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++              pmi_code  = pmi_base;
++#endif
++
++              pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++              pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pmi_start = ktva_ktla(pmi_start);
++              pmi_pal = ktva_ktla(pmi_pal);
++              pax_close_kernel();
++#endif
++
+               printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+               if (pmi_base[3]) {
+                       printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
+               (ypan ? FBINFO_HWACCEL_YPAN : 0);
+-      if (!ypan)
+-              info->fbops->fb_pan_display = NULL;
++      if (!ypan) {
++              pax_open_kernel();
++              *(void **)&info->fbops->fb_pan_display = NULL;
++              pax_close_kernel();
++      }
+       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+               err = -ENOMEM;
+@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
+              info->node, info->fix.id);
+       return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      module_free_exec(NULL, pmi_code);
++#endif
++
+       if (info->screen_base)
+               iounmap(info->screen_base);
+       framebuffer_release(info);
+diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
+index 88714ae..16c2e11 100644
+--- a/drivers/video/via/via_clock.h
++++ b/drivers/video/via/via_clock.h
+@@ -56,7 +56,7 @@ struct via_clock {
+       void (*set_engine_pll_state)(u8 state);
+       void (*set_engine_pll)(struct via_pll_config config);
+-};
++} __no_const;
+ static inline u32 get_pll_internal_frequency(u32 ref_freq,
+diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
+index fef20db..d28b1ab 100644
+--- a/drivers/xen/xenfs/xenstored.c
++++ b/drivers/xen/xenfs/xenstored.c
+@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
+ static int xsd_kva_open(struct inode *inode, struct file *file)
+ {
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                                             NULL);
++#else
+                                              xen_store_interface);
++#endif
++
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index 055562c..fdfb10d 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
+       retval = v9fs_file_write_internal(inode,
+                                         v9inode->writeback_fid,
+-                                        (__force const char __user *)buffer,
++                                        (const char __force_user *)buffer,
+                                         len, &offset, 0);
+       if (retval > 0)
+               retval = 0;
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index d86edc8..40ff2fb 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ void
+ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+-      char *s = nd_get_link(nd);
++      const char *s = nd_get_link(nd);
+       p9_debug(P9_DEBUG_VFS, " %s %s\n",
+                dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
+diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
+index 370b24c..ff0be7b 100644
+--- a/fs/Kconfig.binfmt
++++ b/fs/Kconfig.binfmt
+@@ -103,7 +103,7 @@ config HAVE_AOUT
+ config BINFMT_AOUT
+       tristate "Kernel support for a.out and ECOFF binaries"
+-      depends on HAVE_AOUT
++      depends on HAVE_AOUT && BROKEN
+       ---help---
+         A.out (Assembler.OUTput) is a set of formats for libraries and
+         executables used in the earliest versions of UNIX.  Linux used
+diff --git a/fs/aio.c b/fs/aio.c
+index 2bbcacf..8614116 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+       size += sizeof(struct io_event) * nr_events;
+       nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+-      if (nr_pages < 0)
++      if (nr_pages <= 0)
+               return -EINVAL;
+       nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
+ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
+ {
+       ssize_t ret;
++      struct iovec iovstack;
+       kiocb->ki_nr_segs = kiocb->ki_nbytes;
+@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
+       if (compat)
+               ret = compat_rw_copy_check_uvector(rw,
+                               (struct compat_iovec __user *)kiocb->ki_buf,
+-                              kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
++                              kiocb->ki_nr_segs, 1, &iovstack,
+                               &kiocb->ki_iovec);
+       else
+ #endif
+               ret = rw_copy_check_uvector(rw,
+                               (struct iovec __user *)kiocb->ki_buf,
+-                              kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
++                              kiocb->ki_nr_segs, 1, &iovstack,
+                               &kiocb->ki_iovec);
+       if (ret < 0)
+               return ret;
++      if (kiocb->ki_iovec == &iovstack) {
++              kiocb->ki_inline_vec = iovstack;
++              kiocb->ki_iovec = &kiocb->ki_inline_vec;
++      }
++
+       /* ki_nbytes now reflect bytes instead of segs */
+       kiocb->ki_nbytes = ret;
+       return 0;
+diff --git a/fs/attr.c b/fs/attr.c
+index 1449adb..a2038c2 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
+               unsigned long limit;
+               limit = rlimit(RLIMIT_FSIZE);
++              gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
+               if (limit != RLIM_INFINITY && offset > limit)
+                       goto out_sig;
+               if (offset > inode->i_sb->s_maxbytes)
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index 3db70da..7aeec5b 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
+ {
+       unsigned long sigpipe, flags;
+       mm_segment_t fs;
+-      const char *data = (const char *)addr;
++      const char __user *data = (const char __force_user *)addr;
+       ssize_t wr = 0;
+       sigpipe = sigismember(&current->pending.signal, SIGPIPE);
+@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
+       return 1;
+ }
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
++#endif
++
+ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
+               enum autofs_notify notify)
+ {
+@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
+       /* If this is a direct mount request create a dummy name */
+       if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              /* this name does get written to userland via autofs4_write() */
++              qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
++#else
+               qstr.len = sprintf(name, "%p", dentry);
++#endif
+       else {
+               qstr.len = autofs4_getpath(sbi, dentry, &name);
+               if (!qstr.len) {
+diff --git a/fs/befs/endian.h b/fs/befs/endian.h
+index 2722387..c8dd2a7 100644
+--- a/fs/befs/endian.h
++++ b/fs/befs/endian.h
+@@ -11,7 +11,7 @@
+ #include <asm/byteorder.h>
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(const struct super_block *sb, fs64 n)
+ {
+       if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
+               return (__force fs64)cpu_to_be64(n);
+ }
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(const struct super_block *sb, fs32 n)
+ {
+       if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
+index f95dddc..b1e2c1c 100644
+--- a/fs/befs/linuxvfs.c
++++ b/fs/befs/linuxvfs.c
+@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+       befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+       if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+-              char *link = nd_get_link(nd);
++              const char *link = nd_get_link(nd);
+               if (!IS_ERR(link))
+                       kfree(link);
+       }
+diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
+index bce8769..7fc7544 100644
+--- a/fs/binfmt_aout.c
++++ b/fs/binfmt_aout.c
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/ptrace.h>
+@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+ #endif
+ #       define START_STACK(u)   ((void __user *)u.start_stack)
++      memset(&dump, 0, sizeof(dump));
++
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       has_dumped = 1;
+@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+    if we wrote the stack, but not the data area.  */
++      gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
+       if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
+               dump.u_dsize = 0;
+ /* Make sure we have enough room to write the stack and data areas. */
++      gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
+       if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
+               dump.u_ssize = 0;
+@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
+       rlim = rlimit(RLIMIT_DATA);
+       if (rlim >= RLIM_INFINITY)
+               rlim = ~0;
++
++      gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+       if (ex.a_data + ex.a_bss > rlim)
+               return -ENOMEM;
+@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
+       install_exec_creds(bprm);
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++              current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++              if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++                      current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++                      current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++      }
++#endif
++
+       if (N_MAGIC(ex) == OMAGIC) {
+               unsigned long text_addr, map_size;
+               loff_t pos;
+@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
+               }
+               error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+-                              PROT_READ | PROT_WRITE | PROT_EXEC,
++                              PROT_READ | PROT_WRITE,
+                               MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+                               fd_offset + ex.a_text);
+               if (error != N_DATADDR(ex)) {
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index f8a0b0e..6f036ed 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -34,6 +34,7 @@
+ #include <linux/utsname.h>
+ #include <linux/coredump.h>
+ #include <linux/sched.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
+ #define elf_core_dump NULL
+ #endif
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++static void elf_handle_mmap(struct file *file);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
+       .load_binary    = load_elf_binary,
+       .load_shlib     = load_elf_library,
+       .core_dump      = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++      .handle_mprotect= elf_handle_mprotect,
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      .handle_mmap    = elf_handle_mmap,
++#endif
++
+       .min_coredump   = ELF_EXEC_PAGESIZE,
+ };
+@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++      unsigned long e = end;
++
+       start = ELF_PAGEALIGN(start);
+       end = ELF_PAGEALIGN(end);
+       if (end > start) {
+@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
+               if (BAD_ADDR(addr))
+                       return addr;
+       }
+-      current->mm->start_brk = current->mm->brk = end;
++      current->mm->start_brk = current->mm->brk = e;
+       return 0;
+ }
+@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+       elf_addr_t __user *u_rand_bytes;
+       const char *k_platform = ELF_PLATFORM;
+       const char *k_base_platform = ELF_BASE_PLATFORM;
+-      unsigned char k_rand_bytes[16];
++      u32 k_rand_bytes[4];
+       int items;
+       elf_addr_t *elf_info;
+       int ei_index = 0;
+       const struct cred *cred = current_cred();
+       struct vm_area_struct *vma;
++      unsigned long saved_auxv[AT_VECTOR_SIZE];
+       /*
+        * In some cases (e.g. Hyper-Threading), we want to avoid L1
+@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+        * Generate 16 random bytes for userspace PRNG seeding.
+        */
+       get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+-      u_rand_bytes = (elf_addr_t __user *)
+-                     STACK_ALLOC(p, sizeof(k_rand_bytes));
++      prandom_seed(k_rand_bytes[0] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[1] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[2] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[3] ^ prandom_u32());
++      p = STACK_ROUND(p, sizeof(k_rand_bytes));
++      u_rand_bytes = (elf_addr_t __user *) p;
+       if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+               return -EFAULT;
+@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+               return -EFAULT;
+       current->mm->env_end = p;
++      memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
++
+       /* Put the elf_info on the stack in the right place.  */
+       sp = (elf_addr_t __user *)envp + 1;
+-      if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
++      if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
+               return -EFAULT;
+       return 0;
+ }
+@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
+    an ELF header */
+ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+-              struct file *interpreter, unsigned long *interp_map_addr,
+-              unsigned long no_base)
++              struct file *interpreter, unsigned long no_base)
+ {
+       struct elf_phdr *elf_phdata;
+       struct elf_phdr *eppnt;
+-      unsigned long load_addr = 0;
++      unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+       int load_addr_set = 0;
+       unsigned long last_bss = 0, elf_bss = 0;
+-      unsigned long error = ~0UL;
++      unsigned long error = -EINVAL;
+       unsigned long total_size;
+       int retval, i, size;
+@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+               goto out_close;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+       eppnt = elf_phdata;
+       for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+               if (eppnt->p_type == PT_LOAD) {
+@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+                       map_addr = elf_map(interpreter, load_addr + vaddr,
+                                       eppnt, elf_prot, elf_type, total_size);
+                       total_size = 0;
+-                      if (!*interp_map_addr)
+-                              *interp_map_addr = map_addr;
+                       error = map_addr;
+                       if (BAD_ADDR(map_addr))
+                               goto out_close;
+@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+                       k = load_addr + eppnt->p_vaddr;
+                       if (BAD_ADDR(k) ||
+                           eppnt->p_filesz > eppnt->p_memsz ||
+-                          eppnt->p_memsz > TASK_SIZE ||
+-                          TASK_SIZE - eppnt->p_memsz < k) {
++                          eppnt->p_memsz > pax_task_size ||
++                          pax_task_size - eppnt->p_memsz < k) {
+                               error = -ENOMEM;
+                               goto out_close;
+                       }
+@@ -538,6 +567,315 @@ out:
+       return error;
+ }
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (elf_phdata->p_flags & PF_PAGEEXEC)
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (elf_phdata->p_flags & PF_SEGMEXEC)
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (elf_phdata->p_flags & PF_MPROTECT)
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (pax_flags_softmode & MF_PAX_MPROTECT)
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_EI_PAX
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (randomize_va_space)
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#endif
++
++      return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++      unsigned long i;
++
++      for (i = 0UL; i < elf_ex->e_phnum; i++)
++              if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++                      if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++                          ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++                          ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++                          ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++                          ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++                              return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++                      if (pax_softmode)
++                              return pax_parse_pt_pax_softmode(&elf_phdata[i]);
++                      else
++#endif
++
++                              return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
++                      break;
++              }
++#endif
++
++      return ~0UL;
++}
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++      ssize_t xattr_size, i;
++      unsigned char xattr_value[sizeof("pemrs") - 1];
++      unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++      xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
++      if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
++              return ~0UL;
++
++      for (i = 0; i < xattr_size; i++)
++              switch (xattr_value[i]) {
++              default:
++                      return ~0UL;
++
++#define parse_flag(option1, option2, flag)                    \
++              case option1:                                   \
++                      if (pax_flags_hardmode & MF_PAX_##flag) \
++                              return ~0UL;                    \
++                      pax_flags_hardmode |= MF_PAX_##flag;    \
++                      break;                                  \
++              case option2:                                   \
++                      if (pax_flags_softmode & MF_PAX_##flag) \
++                              return ~0UL;                    \
++                      pax_flags_softmode |= MF_PAX_##flag;    \
++                      break;
++
++              parse_flag('p', 'P', PAGEEXEC);
++              parse_flag('e', 'E', EMUTRAMP);
++              parse_flag('m', 'M', MPROTECT);
++              parse_flag('r', 'R', RANDMMAP);
++              parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++              }
++
++      if (pax_flags_hardmode & pax_flags_softmode)
++              return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++      if (pax_softmode)
++              return pax_parse_xattr_pax_softmode(pax_flags_softmode);
++      else
++#endif
++
++              return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++      return ~0UL;
++#endif
++
++}
++
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++      unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
++
++      pax_flags = pax_parse_ei_pax(elf_ex);
++      pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++      xattr_pax_flags = pax_parse_xattr_pax(file);
++
++      if (pt_pax_flags == ~0UL)
++              pt_pax_flags = xattr_pax_flags;
++      else if (xattr_pax_flags == ~0UL)
++              xattr_pax_flags = pt_pax_flags;
++      if (pt_pax_flags != xattr_pax_flags)
++              return -EINVAL;
++      if (pt_pax_flags != ~0UL)
++              pax_flags = pt_pax_flags;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              if ((__supported_pte_mask & _PAGE_NX))
++                      pax_flags &= ~MF_PAX_SEGMEXEC;
++              else
++                      pax_flags &= ~MF_PAX_PAGEEXEC;
++      }
++#endif
++
++      if (0 > pax_check_flags(&pax_flags))
++              return -EINVAL;
++
++      current->mm->pax_flags = pax_flags;
++      return 0;
++}
++#endif
++
+ /*
+  * These are the functions used to load ELF style executables and shared
+  * libraries.  There is no binary dependent code anywhere else.
+@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+       unsigned int random_variable = 0;
++#ifdef CONFIG_PAX_RANDUSTACK
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++              return stack_top - current->mm->delta_stack;
++#endif
++
+       if ((current->flags & PF_RANDOMIZE) &&
+               !(current->personality & ADDR_NO_RANDOMIZE)) {
+               random_variable = get_random_int() & STACK_RND_MASK;
+@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       unsigned long load_addr = 0, load_bias = 0;
+       int load_addr_set = 0;
+       char * elf_interpreter = NULL;
+-      unsigned long error;
++      unsigned long error = 0;
+       struct elf_phdr *elf_ppnt, *elf_phdata;
+       unsigned long elf_bss, elf_brk;
+       int retval, i;
+@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       unsigned long start_code, end_code, start_data, end_data;
+       unsigned long reloc_func_desc __maybe_unused = 0;
+       int executable_stack = EXSTACK_DEFAULT;
+-      unsigned long def_flags = 0;
+       struct pt_regs *regs = current_pt_regs();
+       struct {
+               struct elfhdr elf_ex;
+               struct elfhdr interp_elf_ex;
+       } *loc;
++      unsigned long pax_task_size = TASK_SIZE;
+       loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+       if (!loc) {
+@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
+               goto out_free_dentry;
+       /* OK, This is the point of no return */
+-      current->mm->def_flags = def_flags;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++      current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      current->mm->delta_mmap = 0UL;
++      current->mm->delta_stack = 0UL;
++#endif
++
++      current->mm->def_flags = 0;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
++              send_sig(SIGKILL, current, 0);
++              goto out_free_dentry;
++      }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++      pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++      if (pax_set_initial_flags_func)
++              (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++      if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
++              current->mm->context.user_cs_limit = PAGE_SIZE;
++              current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
++      }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++              current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++              current->mm->def_flags |= VM_NOHUGEPAGE;
++      }
++#endif
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++      if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++              put_cpu();
++      }
++#endif
+       /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+          may depend on the personality.  */
+       SET_PERSONALITY(loc->elf_ex);
++
++#ifdef CONFIG_PAX_ASLR
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++              current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++              current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++      }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              executable_stack = EXSTACK_DISABLE_X;
++              current->personality &= ~READ_IMPLIES_EXEC;
++      } else
++#endif
++
+       if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+               current->personality |= READ_IMPLIES_EXEC;
+@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+                       load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++                      /* PaX: randomize base address at the default exe base if requested */
++                      if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++                              load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++                              load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++                              load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++                              elf_flags |= MAP_FIXED;
++                      }
++#endif
++
+               }
+               error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+                * allowed task size. Note that p_filesz must always be
+                * <= p_memsz so it is only necessary to check p_memsz.
+                */
+-              if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+-                  elf_ppnt->p_memsz > TASK_SIZE ||
+-                  TASK_SIZE - elf_ppnt->p_memsz < k) {
++              if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++                  elf_ppnt->p_memsz > pax_task_size ||
++                  pax_task_size - elf_ppnt->p_memsz < k) {
+                       /* set_brk can never work. Avoid overflows. */
+                       send_sig(SIGKILL, current, 0);
+                       retval = -EINVAL;
+@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
+               goto out_free_dentry;
+       }
+       if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+-              send_sig(SIGSEGV, current, 0);
+-              retval = -EFAULT; /* Nobody gets to see this, but.. */
+-              goto out_free_dentry;
++              /*
++               * This bss-zeroing can fail if the ELF
++               * file specifies odd protections. So
++               * we don't check the return value
++               */
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++              unsigned long start, size, flags;
++              vm_flags_t vm_flags;
++
++              start = ELF_PAGEALIGN(elf_brk);
++              size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
++              flags = MAP_FIXED | MAP_PRIVATE;
++              vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
++
++              down_write(&current->mm->mmap_sem);
++              start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
++              retval = -ENOMEM;
++              if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
++//                    if (current->personality & ADDR_NO_RANDOMIZE)
++//                            vm_flags |= VM_READ | VM_MAYREAD;
++                      start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
++                      retval = IS_ERR_VALUE(start) ? start : 0;
++              }
++              up_write(&current->mm->mmap_sem);
++              if (retval == 0)
++                      retval = set_brk(start + size, start + size + PAGE_SIZE);
++              if (retval < 0) {
++                      send_sig(SIGKILL, current, 0);
++                      goto out_free_dentry;
++              }
++      }
++#endif
++
+       if (elf_interpreter) {
+-              unsigned long interp_map_addr = 0;
+-
+               elf_entry = load_elf_interp(&loc->interp_elf_ex,
+                                           interpreter,
+-                                          &interp_map_addr,
+                                           load_bias);
+               if (!IS_ERR((void *)elf_entry)) {
+                       /*
+@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+  * Decide what to dump of a segment, part, all or none.
+  */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+-                                 unsigned long mm_flags)
++                                 unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type)  (mm_flags & (1UL << MMF_DUMP_##type))
+@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+       if (vma->vm_file == NULL)
+               return 0;
+-      if (FILTER(MAPPED_PRIVATE))
++      if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+               goto whole;
+       /*
+@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+ {
+       elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+       int i = 0;
+-      do
++      do {
+               i += 2;
+-      while (auxv[i - 2] != AT_NULL);
++      } while (auxv[i - 2] != AT_NULL);
+       fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+ {
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
++      copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
+       set_fs(old_fs);
+       fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+ }
+@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+ }
+ static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+-                                   unsigned long mm_flags)
++                                   struct coredump_params *cprm)
+ {
+       struct vm_area_struct *vma;
+       size_t size = 0;
+       for (vma = first_vma(current, gate_vma); vma != NULL;
+            vma = next_vma(vma, gate_vma))
+-              size += vma_dump_size(vma, mm_flags);
++              size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
+       return size;
+ }
+@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+       dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+-      offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
++      offset += elf_core_vma_data_size(gate_vma, cprm);
+       offset += elf_core_extra_data_size();
+       e_shoff = offset;
+@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+       offset = dataoff;
+       size += sizeof(*elf);
++      gr_learn_resource(current, RLIMIT_CORE, size, 1);
+       if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+               goto end_coredump;
+       size += sizeof(*phdr4note);
++      gr_learn_resource(current, RLIMIT_CORE, size, 1);
+       if (size > cprm->limit
+           || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+               goto end_coredump;
+@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+               phdr.p_offset = offset;
+               phdr.p_vaddr = vma->vm_start;
+               phdr.p_paddr = 0;
+-              phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
++              phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
+               phdr.p_memsz = vma->vm_end - vma->vm_start;
+               offset += phdr.p_filesz;
+               phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+               phdr.p_align = ELF_EXEC_PAGESIZE;
+               size += sizeof(phdr);
++              gr_learn_resource(current, RLIMIT_CORE, size, 1);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+                       goto end_coredump;
+@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+               unsigned long addr;
+               unsigned long end;
+-              end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
++              end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
+               for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
+                       struct page *page;
+@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+                       page = get_dump_page(addr);
+                       if (page) {
+                               void *kaddr = kmap(page);
++                              gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
+                               stop = ((size += PAGE_SIZE) > cprm->limit) ||
+                                       !dump_write(cprm->file, kaddr,
+                                                   PAGE_SIZE);
+@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+       if (e_phnum == PN_XNUM) {
+               size += sizeof(*shdr4extnum);
++              gr_learn_resource(current, RLIMIT_CORE, size, 1);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, shdr4extnum,
+                                  sizeof(*shdr4extnum)))
+@@ -2231,6 +2691,167 @@ out:
+ #endif                /* CONFIG_ELF_CORE */
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++      struct elfhdr elf_h;
++      struct elf_phdr elf_p;
++      unsigned long i;
++      unsigned long oldflags;
++      bool is_textrel_rw, is_textrel_rx, is_relro;
++
++      if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
++              return;
++
++      oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++      newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_ELFRELOCS
++      /* possible TEXTREL */
++      is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++      is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#else
++      is_textrel_rw = false;
++      is_textrel_rx = false;
++#endif
++
++      /* possible RELRO */
++      is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++      if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++              return;
++
++      if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++          memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++          ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++          ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++          (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++          !elf_check_arch(&elf_h) ||
++          elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++          elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++              return;
++
++      for (i = 0UL; i < elf_h.e_phnum; i++) {
++              if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++                      return;
++              switch (elf_p.p_type) {
++              case PT_DYNAMIC:
++                      if (!is_textrel_rw && !is_textrel_rx)
++                              continue;
++                      i = 0UL;
++                      while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++                              elf_dyn dyn;
++
++                              if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++                                      break;
++                              if (dyn.d_tag == DT_NULL)
++                                      break;
++                              if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++                                      gr_log_textrel(vma);
++                                      if (is_textrel_rw)
++                                              vma->vm_flags |= VM_MAYWRITE;
++                                      else
++                                              /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++                                              vma->vm_flags &= ~VM_MAYWRITE;
++                                      break;
++                              }
++                              i++;
++                      }
++                      is_textrel_rw = false;
++                      is_textrel_rx = false;
++                      continue;
++
++              case PT_GNU_RELRO:
++                      if (!is_relro)
++                              continue;
++                      if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++                              vma->vm_flags &= ~VM_MAYWRITE;
++                      is_relro = false;
++                      continue;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++              case PT_PAX_FLAGS: {
++                      const char *msg_mprotect = "", *msg_emutramp = "";
++                      char *buffer_lib, *buffer_exe;
++
++                      if (elf_p.p_flags & PF_NOMPROTECT)
++                              msg_mprotect = "MPROTECT disabled";
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
++                              msg_emutramp = "EMUTRAMP enabled";
++#endif
++
++                      if (!msg_mprotect[0] && !msg_emutramp[0])
++                              continue;
++
++                      if (!printk_ratelimit())
++                              continue;
++
++                      buffer_lib = (char *)__get_free_page(GFP_KERNEL);
++                      buffer_exe = (char *)__get_free_page(GFP_KERNEL);
++                      if (buffer_lib && buffer_exe) {
++                              char *path_lib, *path_exe;
++
++                              path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
++                              path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
++
++                              pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
++                                      (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
++
++                      }
++                      free_page((unsigned long)buffer_exe);
++                      free_page((unsigned long)buffer_lib);
++                      continue;
++              }
++#endif
++
++              }
++      }
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++
++extern int grsec_enable_log_rwxmaps;
++
++static void elf_handle_mmap(struct file *file)
++{
++      struct elfhdr elf_h;
++      struct elf_phdr elf_p;
++      unsigned long i;
++
++      if (!grsec_enable_log_rwxmaps)
++              return;
++
++      if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++          memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++          (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
++          elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++          elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++              return;
++
++      for (i = 0UL; i < elf_h.e_phnum; i++) {
++              if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++                      return;
++              if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
++                      gr_log_ptgnustack(file);
++      }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+       register_binfmt(&elf_format);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index d50bbe5..af3b649 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
+                               realdatastart = (unsigned long) -ENOMEM;
+                       printk("Unable to allocate RAM for process data, errno %d\n",
+                                       (int)-realdatastart);
++                      down_write(&current->mm->mmap_sem);
+                       vm_munmap(textpos, text_len);
++                      up_write(&current->mm->mmap_sem);
+                       ret = realdatastart;
+                       goto err;
+               }
+@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
+               }
+               if (IS_ERR_VALUE(result)) {
+                       printk("Unable to read data+bss, errno %d\n", (int)-result);
++                      down_write(&current->mm->mmap_sem);
+                       vm_munmap(textpos, text_len);
+                       vm_munmap(realdatastart, len);
++                      up_write(&current->mm->mmap_sem);
+                       ret = result;
+                       goto err;
+               }
+@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
+               }
+               if (IS_ERR_VALUE(result)) {
+                       printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++                      down_write(&current->mm->mmap_sem);
+                       vm_munmap(textpos, text_len + data_len + extra +
+                               MAX_SHARED_LIBS * sizeof(unsigned long));
++                      up_write(&current->mm->mmap_sem);
+                       ret = result;
+                       goto err;
+               }
+diff --git a/fs/bio.c b/fs/bio.c
+index 94bbc04..6fe78a4 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+               /*
+                * Overflow, abort
+                */
+-              if (end < start)
++              if (end < start || end - start > INT_MAX - nr_pages)
+                       return ERR_PTR(-EINVAL);
+               nr_pages += end - start;
+@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
+               /*
+                * Overflow, abort
+                */
+-              if (end < start)
++              if (end < start || end - start > INT_MAX - nr_pages)
+                       return ERR_PTR(-EINVAL);
+               nr_pages += end - start;
+@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
+       const int read = bio_data_dir(bio) == READ;
+       struct bio_map_data *bmd = bio->bi_private;
+       int i;
+-      char *p = bmd->sgvecs[0].iov_base;
++      char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
+       bio_for_each_segment_all(bvec, bio, i) {
+               char *addr = page_address(bvec->bv_page);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 85f5c85..d6f0b1a 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+       else if (bdev->bd_contains == bdev)
+               return true;     /* is a whole device which isn't held */
+-      else if (whole->bd_holder == bd_may_claim)
++      else if (whole->bd_holder == (void *)bd_may_claim)
+               return true;     /* is a partition of a device that is being partitioned */
+       else if (whole->bd_holder != NULL)
+               return false;    /* is a partition of a held device */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 7fb054b..ad36c67 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+               free_extent_buffer(buf);
+               add_root_to_dirty_list(root);
+       } else {
+-              if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+-                      parent_start = parent->start;
+-              else
++              if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
++                      if (parent)
++                              parent_start = parent->start;
++                      else
++                              parent_start = 0;
++              } else
+                       parent_start = 0;
+               WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 0f81d67..0ad55fe 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+       for (i = 0; i < num_types; i++) {
+               struct btrfs_space_info *tmp;
++              /* Don't copy in more than we allocated */
+               if (!slot_count)
+                       break;
++              slot_count--;
++
+               info = NULL;
+               rcu_read_lock();
+               list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+                               memcpy(dest, &space, sizeof(space));
+                               dest++;
+                               space_args.total_spaces++;
+-                              slot_count--;
+                       }
+-                      if (!slot_count)
+-                              break;
+               }
+               up_read(&info->groups_sem);
+       }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index f0857e0..e7023c5 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+                          function, line, errstr);
+               return;
+       }
+-      ACCESS_ONCE(trans->transaction->aborted) = errno;
++      ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
+       __btrfs_std_error(root->fs_info, function, line, errno, NULL);
+ }
+ /*
+diff --git a/fs/buffer.c b/fs/buffer.c
+index d2a4d1b..df798ca 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+-                              SLAB_MEM_SPREAD),
++                              SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
+                               NULL);
+       /*
+diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
+index 622f469..e8d2d55 100644
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+              args);
+       /* start by checking things over */
+-      ASSERT(cache->fstop_percent >= 0 &&
+-             cache->fstop_percent < cache->fcull_percent &&
++      ASSERT(cache->fstop_percent < cache->fcull_percent &&
+              cache->fcull_percent < cache->frun_percent &&
+              cache->frun_percent  < 100);
+-      ASSERT(cache->bstop_percent >= 0 &&
+-             cache->bstop_percent < cache->bcull_percent &&
++      ASSERT(cache->bstop_percent < cache->bcull_percent &&
+              cache->bcull_percent < cache->brun_percent &&
+              cache->brun_percent  < 100);
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 0a1467b..6a53245 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+       if (n > buflen)
+               return -EMSGSIZE;
+-      if (copy_to_user(_buffer, buffer, n) != 0)
++      if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
+               return -EFAULT;
+       return n;
+@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
+       if (test_bit(CACHEFILES_DEAD, &cache->flags))
+               return -EIO;
+-      if (datalen < 0 || datalen > PAGE_SIZE - 1)
++      if (datalen > PAGE_SIZE - 1)
+               return -EOPNOTSUPP;
+       /* drag the command string into the kernel so we can parse it */
+@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
+       if (args[0] != '%' || args[1] != '\0')
+               return -EINVAL;
+-      if (fstop < 0 || fstop >= cache->fcull_percent)
++      if (fstop >= cache->fcull_percent)
+               return cachefiles_daemon_range_error(cache, args);
+       cache->fstop_percent = fstop;
+@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
+       if (args[0] != '%' || args[1] != '\0')
+               return -EINVAL;
+-      if (bstop < 0 || bstop >= cache->bcull_percent)
++      if (bstop >= cache->bcull_percent)
+               return cachefiles_daemon_range_error(cache, args);
+       cache->bstop_percent = bstop;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 4938251..7e01445 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -59,7 +59,7 @@ struct cachefiles_cache {
+       wait_queue_head_t               daemon_pollwq;  /* poll waitqueue for daemon */
+       struct rb_root                  active_nodes;   /* active nodes (can't be culled) */
+       rwlock_t                        active_lock;    /* lock for active_nodes */
+-      atomic_t                        gravecounter;   /* graveyard uniquifier */
++      atomic_unchecked_t              gravecounter;   /* graveyard uniquifier */
+       unsigned                        frun_percent;   /* when to stop culling (% files) */
+       unsigned                        fcull_percent;  /* when to start culling (% files) */
+       unsigned                        fstop_percent;  /* when to stop allocating (% files) */
+@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
+  * proc.c
+  */
+ #ifdef CONFIG_CACHEFILES_HISTOGRAM
+-extern atomic_t cachefiles_lookup_histogram[HZ];
+-extern atomic_t cachefiles_mkdir_histogram[HZ];
+-extern atomic_t cachefiles_create_histogram[HZ];
++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
+ extern int __init cachefiles_proc_init(void);
+ extern void cachefiles_proc_cleanup(void);
+ static inline
+-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
+ {
+       unsigned long jif = jiffies - start_jif;
+       if (jif >= HZ)
+               jif = HZ - 1;
+-      atomic_inc(&histogram[jif]);
++      atomic_inc_unchecked(&histogram[jif]);
+ }
+ #else
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index 8c01c5fc..15f982e 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -317,7 +317,7 @@ try_again:
+       /* first step is to make up a grave dentry in the graveyard */
+       sprintf(nbuffer, "%08x%08x",
+               (uint32_t) get_seconds(),
+-              (uint32_t) atomic_inc_return(&cache->gravecounter));
++              (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
+       /* do the multiway lock magic */
+       trap = lock_rename(cache->graveyard, dir);
+diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
+index eccd339..4c1d995 100644
+--- a/fs/cachefiles/proc.c
++++ b/fs/cachefiles/proc.c
+@@ -14,9 +14,9 @@
+ #include <linux/seq_file.h>
+ #include "internal.h"
+-atomic_t cachefiles_lookup_histogram[HZ];
+-atomic_t cachefiles_mkdir_histogram[HZ];
+-atomic_t cachefiles_create_histogram[HZ];
++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++atomic_unchecked_t cachefiles_create_histogram[HZ];
+ /*
+  * display the latency histogram
+@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
+               return 0;
+       default:
+               index = (unsigned long) v - 3;
+-              x = atomic_read(&cachefiles_lookup_histogram[index]);
+-              y = atomic_read(&cachefiles_mkdir_histogram[index]);
+-              z = atomic_read(&cachefiles_create_histogram[index]);
++              x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
++              y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
++              z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
+               if (x == 0 && y == 0 && z == 0)
+                       return 0;
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 317f9ee..3d24511 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
+                       old_fs = get_fs();
+                       set_fs(KERNEL_DS);
+                       ret = file->f_op->write(
+-                              file, (const void __user *) data, len, &pos);
++                              file, (const void __force_user *) data, len, &pos);
+                       set_fs(old_fs);
+                       kunmap(page);
+                       file_end_write(file);
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index f02d82b..2632cf86 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
+       unsigned frag = fpos_frag(filp->f_pos);
+-      int off = fpos_off(filp->f_pos);
++      unsigned int off = fpos_off(filp->f_pos);
+       int err;
+       u32 ftype;
+       struct ceph_mds_reply_info_parsed *rinfo;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 7d377c9..3fb6559 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
+ /*
+  * construct our own bdi so we can control readahead, etc.
+  */
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+ static int ceph_register_bdi(struct super_block *sb,
+                            struct ceph_fs_client *fsc)
+@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
+                       default_backing_dev_info.ra_pages;
+       err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
+-                         atomic_long_inc_return(&bdi_seq));
++                         atomic_long_inc_return_unchecked(&bdi_seq));
+       if (!err)
+               sb->s_bdi = &fsc->backing_dev_info;
+       return err;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index d597483..747901b 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+       if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_set(&totBufAllocCount, 0);
+-              atomic_set(&totSmBufAllocCount, 0);
++              atomic_set_unchecked(&totBufAllocCount, 0);
++              atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+               spin_lock(&cifs_tcp_ses_lock);
+               list_for_each(tmp1, &cifs_tcp_ses_list) {
+@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+                                       tcon = list_entry(tmp3,
+                                                         struct cifs_tcon,
+                                                         tcon_list);
+-                                      atomic_set(&tcon->num_smbs_sent, 0);
++                                      atomic_set_unchecked(&tcon->num_smbs_sent, 0);
+                                       if (server->ops->clear_stats)
+                                               server->ops->clear_stats(tcon);
+                               }
+@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+                       smBufAllocCount.counter, cifs_min_small);
+ #ifdef CONFIG_CIFS_STATS2
+       seq_printf(m, "Total Large %d Small %d Allocations\n",
+-                              atomic_read(&totBufAllocCount),
+-                              atomic_read(&totSmBufAllocCount));
++                              atomic_read_unchecked(&totBufAllocCount),
++                              atomic_read_unchecked(&totSmBufAllocCount));
+ #endif /* CONFIG_CIFS_STATS2 */
+       seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
+@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+                               if (tcon->need_reconnect)
+                                       seq_puts(m, "\tDISCONNECTED ");
+                               seq_printf(m, "\nSMBs: %d",
+-                                         atomic_read(&tcon->num_smbs_sent));
++                                         atomic_read_unchecked(&tcon->num_smbs_sent));
+                               if (server->ops->print_stats)
+                                       server->ops->print_stats(m, tcon);
+                       }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 3752b9f..8db5569 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
+ */
+       cifs_req_cachep = kmem_cache_create("cifs_request",
+                                           CIFSMaxBufSize + max_hdr_size, 0,
+-                                          SLAB_HWCACHE_ALIGN, NULL);
++                                          SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
+       if (cifs_req_cachep == NULL)
+               return -ENOMEM;
+@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
+       efficient to alloc 1 per page off the slab compared to 17K (5page)
+       alloc of large cifs buffers even when page debugging is on */
+       cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
+-                      MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
++                      MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
+                       NULL);
+       if (cifs_sm_req_cachep == NULL) {
+               mempool_destroy(cifs_req_poolp);
+@@ -1147,8 +1147,8 @@ init_cifs(void)
+       atomic_set(&bufAllocCount, 0);
+       atomic_set(&smBufAllocCount, 0);
+ #ifdef CONFIG_CIFS_STATS2
+-      atomic_set(&totBufAllocCount, 0);
+-      atomic_set(&totSmBufAllocCount, 0);
++      atomic_set_unchecked(&totBufAllocCount, 0);
++      atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+       atomic_set(&midCount, 0);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index ea3a0b3..0194e39 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -752,35 +752,35 @@ struct cifs_tcon {
+       __u16 Flags;            /* optional support bits */
+       enum statusEnum tidStatus;
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_t num_smbs_sent;
++      atomic_unchecked_t num_smbs_sent;
+       union {
+               struct {
+-                      atomic_t num_writes;
+-                      atomic_t num_reads;
+-                      atomic_t num_flushes;
+-                      atomic_t num_oplock_brks;
+-                      atomic_t num_opens;
+-                      atomic_t num_closes;
+-                      atomic_t num_deletes;
+-                      atomic_t num_mkdirs;
+-                      atomic_t num_posixopens;
+-                      atomic_t num_posixmkdirs;
+-                      atomic_t num_rmdirs;
+-                      atomic_t num_renames;
+-                      atomic_t num_t2renames;
+-                      atomic_t num_ffirst;
+-                      atomic_t num_fnext;
+-                      atomic_t num_fclose;
+-                      atomic_t num_hardlinks;
+-                      atomic_t num_symlinks;
+-                      atomic_t num_locks;
+-                      atomic_t num_acl_get;
+-                      atomic_t num_acl_set;
++                      atomic_unchecked_t num_writes;
++                      atomic_unchecked_t num_reads;
++                      atomic_unchecked_t num_flushes;
++                      atomic_unchecked_t num_oplock_brks;
++                      atomic_unchecked_t num_opens;
++                      atomic_unchecked_t num_closes;
++                      atomic_unchecked_t num_deletes;
++                      atomic_unchecked_t num_mkdirs;
++                      atomic_unchecked_t num_posixopens;
++                      atomic_unchecked_t num_posixmkdirs;
++                      atomic_unchecked_t num_rmdirs;
++                      atomic_unchecked_t num_renames;
++                      atomic_unchecked_t num_t2renames;
++                      atomic_unchecked_t num_ffirst;
++                      atomic_unchecked_t num_fnext;
++                      atomic_unchecked_t num_fclose;
++                      atomic_unchecked_t num_hardlinks;
++                      atomic_unchecked_t num_symlinks;
++                      atomic_unchecked_t num_locks;
++                      atomic_unchecked_t num_acl_get;
++                      atomic_unchecked_t num_acl_set;
+               } cifs_stats;
+ #ifdef CONFIG_CIFS_SMB2
+               struct {
+-                      atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+-                      atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
++                      atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
++                      atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+               } smb2_stats;
+ #endif /* CONFIG_CIFS_SMB2 */
+       } stats;
+@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
+ }
+ #ifdef CONFIG_CIFS_STATS
+-#define cifs_stats_inc atomic_inc
++#define cifs_stats_inc atomic_inc_unchecked
+ static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
+                                           unsigned int bytes)
+@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+ /* Various Debug counters */
+ GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
+ #ifdef CONFIG_CIFS_STATS2
+-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
+-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
+ #endif
+ GLOBAL_EXTERN atomic_t smBufAllocCount;
+ GLOBAL_EXTERN atomic_t midCount;
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index b83c3f5..6437caa 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -616,7 +616,7 @@ symlink_exit:
+ void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
+ {
+-      char *p = nd_get_link(nd);
++      const char *p = nd_get_link(nd);
+       if (!IS_ERR(p))
+               kfree(p);
+ }
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 1bec014..f329411 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -169,7 +169,7 @@ cifs_buf_get(void)
+               memset(ret_buf, 0, buf_size + 3);
+               atomic_inc(&bufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_inc(&totBufAllocCount);
++              atomic_inc_unchecked(&totBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+       }
+@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
+       /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+               atomic_inc(&smBufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_inc(&totSmBufAllocCount);
++              atomic_inc_unchecked(&totSmBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+       }
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 3efdb9d..e845a5e 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -591,27 +591,27 @@ static void
+ cifs_clear_stats(struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
+ #endif
+ }
+@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+       seq_printf(m, " Oplocks breaks: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
+       seq_printf(m, "\nReads:  %d Bytes: %llu",
+-                 atomic_read(&tcon->stats.cifs_stats.num_reads),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
+                  (long long)(tcon->bytes_read));
+       seq_printf(m, "\nWrites: %d Bytes: %llu",
+-                 atomic_read(&tcon->stats.cifs_stats.num_writes),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
+                  (long long)(tcon->bytes_written));
+       seq_printf(m, "\nFlushes: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_flushes));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
+       seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_locks),
+-                 atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
+-                 atomic_read(&tcon->stats.cifs_stats.num_symlinks));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
+       seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_opens),
+-                 atomic_read(&tcon->stats.cifs_stats.num_closes),
+-                 atomic_read(&tcon->stats.cifs_stats.num_deletes));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
+       seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_posixopens),
+-                 atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
+       seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
+-                 atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
+       seq_printf(m, "\nRenames: %d T2 Renames %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_renames),
+-                 atomic_read(&tcon->stats.cifs_stats.num_t2renames));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
+       seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_ffirst),
+-                 atomic_read(&tcon->stats.cifs_stats.num_fnext),
+-                 atomic_read(&tcon->stats.cifs_stats.num_fclose));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
+ #endif
+ }
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index f2e76f3..c44fac7 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
+ #ifdef CONFIG_CIFS_STATS
+       int i;
+       for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+-              atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+-              atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
++              atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
++              atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+       }
+ #endif
+ }
+@@ -284,66 +284,66 @@ static void
+ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+-      atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
++      atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
++      atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+       seq_printf(m, "\nNegotiates: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_NEGOTIATE_HE]),
+-                 atomic_read(&failed[SMB2_NEGOTIATE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
+       seq_printf(m, "\nSessionSetups: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
+-                 atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
++                 atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
++                 atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
+ #define SMB2LOGOFF            0x0002 /* trivial request/resp */
+       seq_printf(m, "\nLogoffs: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_LOGOFF_HE]),
+-                 atomic_read(&failed[SMB2_LOGOFF_HE]));
++                 atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
++                 atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
+       seq_printf(m, "\nTreeConnects: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
+-                 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
++                 atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
++                 atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
+       seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
+-                 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
++                 atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
++                 atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
+       seq_printf(m, "\nCreates: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CREATE_HE]),
+-                 atomic_read(&failed[SMB2_CREATE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
+       seq_printf(m, "\nCloses: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CLOSE_HE]),
+-                 atomic_read(&failed[SMB2_CLOSE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
+       seq_printf(m, "\nFlushes: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_FLUSH_HE]),
+-                 atomic_read(&failed[SMB2_FLUSH_HE]));
++                 atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
++                 atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
+       seq_printf(m, "\nReads: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_READ_HE]),
+-                 atomic_read(&failed[SMB2_READ_HE]));
++                 atomic_read_unchecked(&sent[SMB2_READ_HE]),
++                 atomic_read_unchecked(&failed[SMB2_READ_HE]));
+       seq_printf(m, "\nWrites: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_WRITE_HE]),
+-                 atomic_read(&failed[SMB2_WRITE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
+       seq_printf(m, "\nLocks: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_LOCK_HE]),
+-                 atomic_read(&failed[SMB2_LOCK_HE]));
++                 atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
++                 atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
+       seq_printf(m, "\nIOCTLs: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_IOCTL_HE]),
+-                 atomic_read(&failed[SMB2_IOCTL_HE]));
++                 atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
++                 atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
+       seq_printf(m, "\nCancels: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CANCEL_HE]),
+-                 atomic_read(&failed[SMB2_CANCEL_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
+       seq_printf(m, "\nEchos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_ECHO_HE]),
+-                 atomic_read(&failed[SMB2_ECHO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
+       seq_printf(m, "\nQueryDirectories: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
+-                 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
++                 atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
++                 atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
+       seq_printf(m, "\nChangeNotifies: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
+-                 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
+       seq_printf(m, "\nQueryInfos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
+-                 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
+       seq_printf(m, "\nSetInfos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_SET_INFO_HE]),
+-                 atomic_read(&failed[SMB2_SET_INFO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
+       seq_printf(m, "\nOplockBreaks: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
+-                 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
++                 atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
++                 atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
+ #endif
+ }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2b95ce2..d079d75 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+       default:
+               cifs_dbg(VFS, "info level %u isn't supported\n",
+                        srch_inf->info_level);
+-              rc = -EINVAL;
+-              goto qdir_exit;
++              return -EINVAL;
+       }
+       req->FileIndex = cpu_to_le32(index);
+diff --git a/fs/coda/cache.c b/fs/coda/cache.c
+index 1da168c..8bc7ff6 100644
+--- a/fs/coda/cache.c
++++ b/fs/coda/cache.c
+@@ -24,7 +24,7 @@
+ #include "coda_linux.h"
+ #include "coda_cache.h"
+-static atomic_t permission_epoch = ATOMIC_INIT(0);
++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
+ /* replace or extend an acl cache hit */
+ void coda_cache_enter(struct inode *inode, int mask)
+@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
+       struct coda_inode_info *cii = ITOC(inode);
+       spin_lock(&cii->c_lock);
+-      cii->c_cached_epoch = atomic_read(&permission_epoch);
++      cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
+       if (!uid_eq(cii->c_uid, current_fsuid())) {
+               cii->c_uid = current_fsuid();
+                 cii->c_cached_perm = mask;
+@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
+ {
+       struct coda_inode_info *cii = ITOC(inode);
+       spin_lock(&cii->c_lock);
+-      cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
++      cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
+       spin_unlock(&cii->c_lock);
+ }
+ /* remove all acl caches */
+ void coda_cache_clear_all(struct super_block *sb)
+ {
+-      atomic_inc(&permission_epoch);
++      atomic_inc_unchecked(&permission_epoch);
+ }
+@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
+       spin_lock(&cii->c_lock);
+       hit = (mask & cii->c_cached_perm) == mask &&
+           uid_eq(cii->c_uid, current_fsuid()) &&
+-          cii->c_cached_epoch == atomic_read(&permission_epoch);
++          cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
+       spin_unlock(&cii->c_lock);
+       return hit;
+diff --git a/fs/compat.c b/fs/compat.c
+index fc3b55d..7b568ae 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -54,7 +54,7 @@
+ #include <asm/ioctls.h>
+ #include "internal.h"
+-int compat_log = 1;
++int compat_log = 0;
+ int compat_printk(const char *fmt, ...)
+ {
+@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
+       set_fs(KERNEL_DS);
+       /* The __user pointer cast is valid because of the set_fs() */
+-      ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
++      ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
+       set_fs(oldfs);
+       /* truncating is ok because it's a user address */
+       if (!ret)
+@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
+               goto out;
+       ret = -EINVAL;
+-      if (nr_segs > UIO_MAXIOV || nr_segs < 0)
++      if (nr_segs > UIO_MAXIOV)
+               goto out;
+       if (nr_segs > fast_segs) {
+               ret = -ENOMEM;
+@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
+ struct compat_readdir_callback {
+       struct compat_old_linux_dirent __user *dirent;
++      struct file * file;
+       int result;
+ };
+@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
+               buf->result = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       buf->result++;
+       dirent = buf->dirent;
+       if (!access_ok(VERIFY_WRITE, dirent,
+@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
+       buf.result = 0;
+       buf.dirent = dirent;
++      buf.file = f.file;
+       error = vfs_readdir(f.file, compat_fillonedir, &buf);
+       if (buf.result)
+@@ -899,6 +905,7 @@ struct compat_linux_dirent {
+ struct compat_getdents_callback {
+       struct compat_linux_dirent __user *current_dir;
+       struct compat_linux_dirent __user *previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
+               buf->error = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (__put_user(offset, &dirent->d_off))
+@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
+       buf.previous = NULL;
+       buf.count = count;
+       buf.error = 0;
++      buf.file = f.file;
+       error = vfs_readdir(f.file, compat_filldir, &buf);
+       if (error >= 0)
+@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
+ struct compat_getdents_callback64 {
+       struct linux_dirent64 __user *current_dir;
+       struct linux_dirent64 __user *previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
+       buf->error = -EINVAL;   /* only used if we fail.. */
+       if (reclen > buf->count)
+               return -EINVAL;
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
+       buf.previous = NULL;
+       buf.count = count;
+       buf.error = 0;
++      buf.file = f.file;
+       error = vfs_readdir(f.file, compat_filldir64, &buf);
+       if (error >= 0)
+               error = buf.error;
+       lastdirent = buf.previous;
+       if (lastdirent) {
+-              typeof(lastdirent->d_off) d_off = f.file->f_pos;
++              typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
+               if (__put_user_unaligned(d_off, &lastdirent->d_off))
+                       error = -EFAULT;
+               else
+diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
+index a81147e..20bf2b5 100644
+--- a/fs/compat_binfmt_elf.c
++++ b/fs/compat_binfmt_elf.c
+@@ -30,11 +30,13 @@
+ #undef        elf_phdr
+ #undef        elf_shdr
+ #undef        elf_note
++#undef        elf_dyn
+ #undef        elf_addr_t
+ #define elfhdr                elf32_hdr
+ #define elf_phdr      elf32_phdr
+ #define elf_shdr      elf32_shdr
+ #define elf_note      elf32_note
++#define elf_dyn               Elf32_Dyn
+ #define elf_addr_t    Elf32_Addr
+ /*
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 996cdc5..15e2f33 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
+                       return -EFAULT;
+                 if (__get_user(udata, &ss32->iomem_base))
+                       return -EFAULT;
+-                ss.iomem_base = compat_ptr(udata);
++                ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
+                 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+                   __get_user(ss.port_high, &ss32->port_high))
+                       return -EFAULT;
+@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
+       for (i = 0; i < nmsgs; i++) {
+               if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
+                       return -EFAULT;
+-              if (get_user(datap, &umsgs[i].buf) ||
+-                  put_user(compat_ptr(datap), &tmsgs[i].buf))
++              if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
++                  put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
+                       return -EFAULT;
+       }
+       return sys_ioctl(fd, cmd, (unsigned long)tdata);
+@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
+           copy_in_user(&p->l_len,     &p32->l_len,    sizeof(s64)) ||
+           copy_in_user(&p->l_sysid,   &p32->l_sysid,  sizeof(s32)) ||
+           copy_in_user(&p->l_pid,     &p32->l_pid,    sizeof(u32)) ||
+-          copy_in_user(&p->l_pad,     &p32->l_pad,    4*sizeof(u32)))
++          copy_in_user(p->l_pad,      &p32->l_pad,    4*sizeof(u32)))
+               return -EFAULT;
+       return ioctl_preallocate(file, p);
+@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
+ {
+       unsigned int a, b;
+-      a = *(unsigned int *)p;
+-      b = *(unsigned int *)q;
++      a = *(const unsigned int *)p;
++      b = *(const unsigned int *)q;
+       if (a > b)
+               return 1;
+       if (a < b)
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 7aabc6a..34c1197 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
+                       }
+                       for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+                               struct configfs_dirent *next;
+-                              const char * name;
++                              const unsigned char * name;
++                              char d_name[sizeof(next->s_dentry->d_iname)];
+                               int len;
+                               struct inode *inode = NULL;
+@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
+                                       continue;
+                               name = configfs_get_name(next);
+-                              len = strlen(name);
++                              if (next->s_dentry && name == next->s_dentry->d_iname) {
++                                      len =  next->s_dentry->d_name.len;
++                                      memcpy(d_name, name, len);
++                                      name = d_name;
++                              } else
++                                      len = strlen(name);
+                               /*
+                                * We'll have a dentry and an inode for
+diff --git a/fs/coredump.c b/fs/coredump.c
+index dafafba..10b3b27 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -52,7 +52,7 @@ struct core_name {
+       char *corename;
+       int used, size;
+ };
+-static atomic_t call_count = ATOMIC_INIT(1);
++static atomic_unchecked_t call_count = ATOMIC_INIT(1);
+ /* The maximal length of core_pattern is also specified in sysctl.c */
+@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
+ {
+       char *old_corename = cn->corename;
+-      cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
++      cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
+       cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+       if (!cn->corename) {
+@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+       int pid_in_pattern = 0;
+       int err = 0;
+-      cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
++      cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
+       cn->corename = kmalloc(cn->size, GFP_KERNEL);
+       cn->used = 0;
+@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
+       struct pipe_inode_info *pipe = file->private_data;
+       pipe_lock(pipe);
+-      pipe->readers++;
+-      pipe->writers--;
++      atomic_inc(&pipe->readers);
++      atomic_dec(&pipe->writers);
+       wake_up_interruptible_sync(&pipe->wait);
+       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+       pipe_unlock(pipe);
+@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
+        * We actually want wait_event_freezable() but then we need
+        * to clear TIF_SIGPENDING and improve dump_interrupted().
+        */
+-      wait_event_interruptible(pipe->wait, pipe->readers == 1);
++      wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
+       pipe_lock(pipe);
+-      pipe->readers--;
+-      pipe->writers++;
++      atomic_dec(&pipe->readers);
++      atomic_inc(&pipe->writers);
+       pipe_unlock(pipe);
+ }
+@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
+       struct files_struct *displaced;
+       bool need_nonrelative = false;
+       bool core_dumped = false;
+-      static atomic_t core_dump_count = ATOMIC_INIT(0);
++      static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
++      long signr = siginfo->si_signo;
+       struct coredump_params cprm = {
+               .siginfo = siginfo,
+               .regs = signal_pt_regs(),
+@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
+               .mm_flags = mm->flags,
+       };
+-      audit_core_dumps(siginfo->si_signo);
++      audit_core_dumps(signr);
++
++      if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
++              gr_handle_brute_attach(cprm.mm_flags);
+       binfmt = mm->binfmt;
+       if (!binfmt || !binfmt->core_dump)
+@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
+               need_nonrelative = true;
+       }
+-      retval = coredump_wait(siginfo->si_signo, &core_state);
++      retval = coredump_wait(signr, &core_state);
+       if (retval < 0)
+               goto fail_creds;
+@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
+               }
+               cprm.limit = RLIM_INFINITY;
+-              dump_count = atomic_inc_return(&core_dump_count);
++              dump_count = atomic_inc_return_unchecked(&core_dump_count);
+               if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+                       printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+                              task_tgid_vnr(current), current->comm);
+@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
+       } else {
+               struct inode *inode;
++              gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
++
+               if (cprm.limit < binfmt->min_coredump)
+                       goto fail_unlock;
+@@ -666,7 +672,7 @@ close_fail:
+               filp_close(cprm.file, NULL);
+ fail_dropcount:
+       if (ispipe)
+-              atomic_dec(&core_dump_count);
++              atomic_dec_unchecked(&core_dump_count);
+ fail_unlock:
+       kfree(cn.corename);
+ fail_corename:
+@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
+ {
+       return !dump_interrupted() &&
+               access_ok(VERIFY_READ, addr, nr) &&
+-              file->f_op->write(file, addr, nr, &file->f_pos) == nr;
++              file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
+ }
+ EXPORT_SYMBOL(dump_write);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index f09b908..04b9690 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
+       mempages -= reserve;
+       names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+-                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
++                      SLAB_NO_SANITIZE, NULL);
+       dcache_init();
+       inode_init();
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index c7c83ff..bda9461 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
+  */
+ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+ {
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++      return __create_file(name, S_IFDIR | S_IRWXU,
++#else
+       return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
++#endif
+                                  parent, NULL, NULL);
+ }
+ EXPORT_SYMBOL_GPL(debugfs_create_dir);
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 5eab400..810a3f5 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+-                                                 (char __user *)lower_buf,
++                                                 (char __force_user *)lower_buf,
+                                                  PATH_MAX);
+       set_fs(old_fs);
+       if (rc < 0)
+@@ -706,7 +706,7 @@ out:
+ static void
+ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
+ {
+-      char *buf = nd_get_link(nd);
++      const char *buf = nd_get_link(nd);
+       if (!IS_ERR(buf)) {
+               /* Free the char* */
+               kfree(buf);
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index e4141f2..d8263e8 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -304,7 +304,7 @@ check_list:
+               goto out_unlock_msg_ctx;
+       i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
+       if (msg_ctx->msg) {
+-              if (copy_to_user(&buf[i], packet_length, packet_length_size))
++              if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
+                       goto out_unlock_msg_ctx;
+               i += packet_length_size;
+               if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
+diff --git a/fs/exec.c b/fs/exec.c
+index 1f44670..3c84660 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -55,8 +55,20 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++#include <linux/coredump.h>
++#include <linux/mman.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
++
++#include <trace/events/fs.h>
+ #include <asm/uaccess.h>
++#include <asm/sections.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
+@@ -66,17 +78,32 @@
+ #include <trace/events/sched.h>
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
++}
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int suid_dumpable = 0;
+ static LIST_HEAD(formats);
+ static DEFINE_RWLOCK(binfmt_lock);
++extern int gr_process_kernel_exec_ban(void);
++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
++
+ void __register_binfmt(struct linux_binfmt * fmt, int insert)
+ {
+       BUG_ON(!fmt);
+       write_lock(&binfmt_lock);
+-      insert ? list_add(&fmt->lh, &formats) :
+-               list_add_tail(&fmt->lh, &formats);
++      insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
++               pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
+       write_unlock(&binfmt_lock);
+ }
+@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
+ void unregister_binfmt(struct linux_binfmt * fmt)
+ {
+       write_lock(&binfmt_lock);
+-      list_del(&fmt->lh);
++      pax_list_del((struct list_head *)&fmt->lh);
+       write_unlock(&binfmt_lock);
+ }
+@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+               int write)
+ {
+       struct page *page;
+-      int ret;
+-#ifdef CONFIG_STACK_GROWSUP
+-      if (write) {
+-              ret = expand_downwards(bprm->vma, pos);
+-              if (ret < 0)
+-                      return NULL;
+-      }
+-#endif
+-      ret = get_user_pages(current, bprm->mm, pos,
+-                      1, write, 1, &page, NULL);
+-      if (ret <= 0)
++      if (0 > expand_downwards(bprm->vma, pos))
++              return NULL;
++      if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
+               return NULL;
+       if (write) {
+@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+               if (size <= ARG_MAX)
+                       return page;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              // only allow 512KB for argv+env on suid/sgid binaries
++              // to prevent easy ASLR exhaustion
++              if (((!uid_eq(bprm->cred->euid, current_euid())) ||
++                   (!gid_eq(bprm->cred->egid, current_egid()))) &&
++                  (size > (512 * 1024))) {
++                      put_page(page);
++                      return NULL;
++              }
++#endif
++
+               /*
+                * Limit to 1/4-th the stack size for the argv+env strings.
+                * This ensures that:
+@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+       vma->vm_end = STACK_TOP_MAX;
+       vma->vm_start = vma->vm_end - PAGE_SIZE;
+       vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+       mm->stack_vm = mm->total_vm = 1;
+       up_write(&mm->mmap_sem);
+       bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++      if (randomize_va_space)
++              bprm->p ^= prandom_u32() & ~PAGE_MASK;
++#endif
++
+       return 0;
+ err:
+       up_write(&mm->mmap_sem);
+@@ -396,7 +437,7 @@ struct user_arg_ptr {
+       } ptr;
+ };
+-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+ {
+       const char __user *native;
+@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+               compat_uptr_t compat;
+               if (get_user(compat, argv.ptr.compat + nr))
+-                      return ERR_PTR(-EFAULT);
++                      return (const char __force_user *)ERR_PTR(-EFAULT);
+               return compat_ptr(compat);
+       }
+ #endif
+       if (get_user(native, argv.ptr.native + nr))
+-              return ERR_PTR(-EFAULT);
++              return (const char __force_user *)ERR_PTR(-EFAULT);
+       return native;
+ }
+@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
+                       if (!p)
+                               break;
+-                      if (IS_ERR(p))
++                      if (IS_ERR((const char __force_kernel *)p))
+                               return -EFAULT;
+                       if (i >= max)
+@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
+               ret = -EFAULT;
+               str = get_user_arg_ptr(argv, argc);
+-              if (IS_ERR(str))
++              if (IS_ERR((const char __force_kernel *)str))
+                       goto out;
+               len = strnlen_user(str, MAX_ARG_STRLEN);
+@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
+       int r;
+       mm_segment_t oldfs = get_fs();
+       struct user_arg_ptr argv = {
+-              .ptr.native = (const char __user *const  __user *)__argv,
++              .ptr.native = (const char __force_user * const __force_user *)__argv,
+       };
+       set_fs(KERNEL_DS);
+@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+       unsigned long new_end = old_end - shift;
+       struct mmu_gather tlb;
+-      BUG_ON(new_start > new_end);
++      if (new_start >= new_end || new_start < mmap_min_addr)
++              return -ENOMEM;
+       /*
+        * ensure there are no vmas between where we want to go
+@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+       if (vma != find_vma(mm, new_start))
+               return -EFAULT;
++#ifdef CONFIG_PAX_SEGMEXEC
++      BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+       /*
+        * cover the whole range: [new_start, old_end)
+        */
+@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+       stack_top = arch_align_stack(stack_top);
+       stack_top = PAGE_ALIGN(stack_top);
+-      if (unlikely(stack_top < mmap_min_addr) ||
+-          unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+-              return -ENOMEM;
+-
+       stack_shift = vma->vm_end - stack_top;
+       bprm->p -= stack_shift;
+@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
+       bprm->exec -= stack_shift;
+       down_write(&mm->mmap_sem);
++
++      /* Move stack pages down in memory. */
++      if (stack_shift) {
++              ret = shift_arg_pages(vma, stack_shift);
++              if (ret)
++                      goto out_unlock;
++      }
++
+       vm_flags = VM_STACK_FLAGS;
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (mm->pax_flags & MF_PAX_MPROTECT)
++                      vm_flags &= ~VM_MAYEXEC;
++#endif
++
++      }
++#endif
++
+       /*
+        * Adjust stack execute permissions; explicitly enable for
+        * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+               goto out_unlock;
+       BUG_ON(prev != vma);
+-      /* Move stack pages down in memory. */
+-      if (stack_shift) {
+-              ret = shift_arg_pages(vma, stack_shift);
+-              if (ret)
+-                      goto out_unlock;
+-      }
+-
+       /* mprotect_fixup is overkill to remove the temporary stack flags */
+       vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ #endif
+       current->mm->start_stack = bprm->p;
+       ret = expand_stack(vma, stack_base);
++
++#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
++      if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
++              unsigned long size;
++              vm_flags_t vm_flags;
++
++              size = STACK_TOP - vma->vm_end;
++              vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
++
++              ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
++
++#ifdef CONFIG_X86
++              if (!ret) {
++                      size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
++                      ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
++              }
++#endif
++
++      }
++#endif
++
+       if (ret)
+               ret = -EFAULT;
+@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
+       fsnotify_open(file);
++      trace_open_exec(name);
++
+       err = deny_write_access(file);
+       if (err)
+               goto exit;
+@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      result = vfs_read(file, (void __user *)addr, count, &pos);
++      result = vfs_read(file, (void __force_user *)addr, count, &pos);
+       set_fs(old_fs);
+       return result;
+ }
+@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
+       }
+       rcu_read_unlock();
+-      if (p->fs->users > n_fs) {
++      if (atomic_read(&p->fs->users) > n_fs) {
+               bprm->unsafe |= LSM_UNSAFE_SHARE;
+       } else {
+               res = -EAGAIN;
+@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
+ EXPORT_SYMBOL(search_binary_handler);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++static DEFINE_PER_CPU(u64, exec_counter);
++static int __init init_exec_counters(void)
++{
++      unsigned int cpu;
++
++      for_each_possible_cpu(cpu) {
++              per_cpu(exec_counter, cpu) = (u64)cpu;
++      }
++
++      return 0;
++}
++early_initcall(init_exec_counters);
++static inline void increment_exec_counter(void)
++{
++      BUILD_BUG_ON(NR_CPUS > (1 << 16));
++      current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
++}
++#else
++static inline void increment_exec_counter(void) {}
++#endif
++
++extern void gr_handle_exec_args(struct linux_binprm *bprm,
++                              struct user_arg_ptr argv);
++
+ /*
+  * sys_execve() executes a new program.
+  */
+@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
+                               struct user_arg_ptr argv,
+                               struct user_arg_ptr envp)
+ {
++#ifdef CONFIG_GRKERNSEC
++      struct file *old_exec_file;
++      struct acl_subject_label *old_acl;
++      struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+       struct linux_binprm *bprm;
+       struct file *file;
+       struct files_struct *displaced;
+@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
+       int retval;
+       const struct cred *cred = current_cred();
++      gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
++
+       /*
+        * We move the actual failure in case of RLIMIT_NPROC excess from
+        * set*uid() to execve() because too many poorly written programs
+@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
+       if (IS_ERR(file))
+               goto out_unmark;
++      if (gr_ptrace_readexec(file, bprm->unsafe)) {
++              retval = -EPERM;
++              goto out_file;
++      }
++
+       sched_exec();
+       bprm->file = file;
+       bprm->filename = filename;
+       bprm->interp = filename;
++      if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
++              retval = -EACCES;
++              goto out_file;
++      }
++
+       retval = bprm_mm_init(bprm);
+       if (retval)
+               goto out_file;
+@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
+       if (retval < 0)
+               goto out;
++#ifdef CONFIG_GRKERNSEC
++      old_acl = current->acl;
++      memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++      old_exec_file = current->exec_file;
++      get_file(file);
++      current->exec_file = file;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      /* limit suid stack to 8MB
++       * we saved the old limits above and will restore them if this exec fails
++       */
++      if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
++          (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
++              current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
++#endif
++
++      if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
++              retval = -EPERM;
++              goto out_fail;
++      }
++
++      if (!gr_tpe_allow(file)) {
++              retval = -EACCES;
++              goto out_fail;
++      }
++
++      if (gr_check_crash_exec(file)) {
++              retval = -EACCES;
++              goto out_fail;
++      }
++
++      retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
++                                      bprm->unsafe);
++      if (retval < 0)
++              goto out_fail;
++
+       retval = copy_strings_kernel(1, &bprm->filename, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
+       bprm->exec = bprm->p;
+       retval = copy_strings(bprm->envc, envp, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
+       retval = copy_strings(bprm->argc, argv, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
++
++      gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
++
++      gr_handle_exec_args(bprm, argv);
+       retval = search_binary_handler(bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++      if (old_exec_file)
++              fput(old_exec_file);
++#endif
+       /* execve succeeded */
++
++      increment_exec_counter();
+       current->fs->in_exec = 0;
+       current->in_execve = 0;
+       acct_update_integrals(current);
+@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
+               put_files_struct(displaced);
+       return retval;
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++      current->acl = old_acl;
++      memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++      fput(current->exec_file);
++      current->exec_file = old_exec_file;
++#endif
++
+ out:
+       if (bprm->mm) {
+               acct_arg_size(bprm, 0);
+@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+       return error;
+ }
+ #endif
++
++int pax_check_flags(unsigned long *flags)
++{
++      int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++      if (*flags & MF_PAX_SEGMEXEC)
++      {
++              *flags &= ~MF_PAX_SEGMEXEC;
++      retval = -EINVAL;
++      }
++#endif
++
++      if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++          &&  (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_PAGEEXEC;
++              retval = -EINVAL;
++      }
++
++      if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++          && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_MPROTECT;
++      retval = -EINVAL;
++      }
++
++      if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++          && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_EMUTRAMP;
++              retval = -EINVAL;
++      }
++
++      return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++char *pax_get_path(const struct path *path, char *buf, int buflen)
++{
++      char *pathname = d_path(path, buf, buflen);
++
++      if (IS_ERR(pathname))
++              goto toolong;
++
++      pathname = mangle_path(buf, pathname, "\t\n\\");
++      if (!pathname)
++              goto toolong;
++
++      *pathname = 0;
++      return buf;
++
++toolong:
++      return "<path too long>";
++}
++EXPORT_SYMBOL(pax_get_path);
++
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++      struct task_struct *tsk = current;
++      struct mm_struct *mm = current->mm;
++      char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++      char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++      char *path_exec = NULL;
++      char *path_fault = NULL;
++      unsigned long start = 0UL, end = 0UL, offset = 0UL;
++      siginfo_t info = { };
++
++      if (buffer_exec && buffer_fault) {
++              struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++              down_read(&mm->mmap_sem);
++              vma = mm->mmap;
++              while (vma && (!vma_exec || !vma_fault)) {
++                      if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
++                              vma_exec = vma;
++                      if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++                              vma_fault = vma;
++                      vma = vma->vm_next;
++              }
++              if (vma_exec)
++                      path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++              if (vma_fault) {
++                      start = vma_fault->vm_start;
++                      end = vma_fault->vm_end;
++                      offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++                      if (vma_fault->vm_file)
++                              path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++                      else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
++                              path_fault = "<heap>";
++                      else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++                              path_fault = "<stack>";
++                      else
++                              path_fault = "<anonymous mapping>";
++              }
++              up_read(&mm->mmap_sem);
++      }
++      if (tsk->signal->curr_ip)
++              printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
++      else
++              printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++      printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++                      from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
++      free_page((unsigned long)buffer_exec);
++      free_page((unsigned long)buffer_fault);
++      pax_report_insns(regs, pc, sp);
++      info.si_signo = SIGKILL;
++      info.si_errno = 0;
++      info.si_code = SI_KERNEL;
++      info.si_pid = 0;
++      info.si_uid = 0;
++      do_coredump(&info);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++void pax_report_refcount_overflow(struct pt_regs *regs)
++{
++      if (current->signal->curr_ip)
++              printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++                              &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                              from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      else
++              printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
++                              from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
++      preempt_disable();
++      show_regs(regs);
++      preempt_enable();
++      force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
++static noinline int check_stack_object(const void *obj, unsigned long len)
++{
++      const void * const stack = task_stack_page(current);
++      const void * const stackend = stack + THREAD_SIZE;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++      const void *frame = NULL;
++      const void *oldframe;
++#endif
++
++      if (obj + len < obj)
++              return -1;
++
++      if (obj + len <= stack || stackend <= obj)
++              return 0;
++
++      if (obj < stack || stackend < obj + len)
++              return -1;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++      oldframe = __builtin_frame_address(1);
++      if (oldframe)
++              frame = __builtin_frame_address(2);
++      /*
++        low ----------------------------------------------> high
++        [saved bp][saved ip][args][local vars][saved bp][saved ip]
++                            ^----------------^
++                        allow copies only within here
++      */
++      while (stack <= frame && frame < stackend) {
++              /* if obj + len extends past the last frame, this
++                 check won't pass and the next frame will be 0,
++                 causing us to bail out and correctly report
++                 the copy as invalid
++              */
++              if (obj + len <= frame)
++                      return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
++              oldframe = frame;
++              frame = *(const void * const *)frame;
++      }
++      return -1;
++#else
++      return 1;
++#endif
++}
++
++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
++{
++      if (current->signal->curr_ip)
++              printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++                      &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
++      else
++              printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++                      to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
++      dump_stack();
++      gr_handle_kernel_exploit();
++      do_group_exit(SIGKILL);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
++{
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      unsigned long textlow = ktla_ktva((unsigned long)_stext);
++#ifdef CONFIG_MODULES
++      unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
++#else
++      unsigned long texthigh = ktla_ktva((unsigned long)_etext);
++#endif
++
++#else
++      unsigned long textlow = (unsigned long)_stext;
++      unsigned long texthigh = (unsigned long)_etext;
++#endif
++
++      if (high <= textlow || low > texthigh)
++              return false;
++      else
++              return true;
++}
++#endif
++
++void __check_object_size(const void *ptr, unsigned long n, bool to_user)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++      const char *type;
++
++      if (!n)
++              return;
++
++      type = check_heap_object(ptr, n);
++      if (!type) {
++              int ret = check_stack_object(ptr, n);
++              if (ret == 1 || ret == 2)
++                      return;
++              if (ret == 0) {
++                      if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
++                              type = "<kernel text>";
++                      else
++                              return;
++              } else
++                      type = "<process stack>";
++      }
++
++      pax_report_usercopy(ptr, n, to_user, type);
++#endif
++
++}
++EXPORT_SYMBOL(__check_object_size);
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_track_stack(void)
++{
++      unsigned long sp = (unsigned long)&sp;
++      if (sp < current_thread_info()->lowest_stack &&
++          sp > (unsigned long)task_stack_page(current))
++              current_thread_info()->lowest_stack = sp;
++}
++EXPORT_SYMBOL(pax_track_stack);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++{
++      printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
++      dump_stack();
++      do_group_exit(SIGKILL);
++}
++EXPORT_SYMBOL(report_size_overflow);
++#endif
+diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
+index 9f9992b..8b59411 100644
+--- a/fs/ext2/balloc.c
++++ b/fs/ext2/balloc.c
+@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
+       free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+-      if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++      if (free_blocks < root_blocks + 1 &&
+               !uid_eq(sbi->s_resuid, current_fsuid()) &&
+               (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
+-               !in_group_p (sbi->s_resgid))) {
++               !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
+               return 0;
+       }
+       return 1;
+diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
+index 22548f5..41521d8 100644
+--- a/fs/ext3/balloc.c
++++ b/fs/ext3/balloc.c
+@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
+       free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+-      if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++      if (free_blocks < root_blocks + 1 &&
+               !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
+               (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
+-               !in_group_p (sbi->s_resgid))) {
++               !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
+               return 0;
+       }
+       return 1;
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 3742e4c..69a797f 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+       /* Hm, nope.  Are (enough) root reserved clusters available? */
+       if (uid_eq(sbi->s_resuid, current_fsuid()) ||
+           (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
+-          capable(CAP_SYS_RESOURCE) ||
+-          (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
++          (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
++          capable_nolog(CAP_SYS_RESOURCE)) {
+               if (free_clusters >= (nclusters + dirty_clusters +
+                                     resv_clusters))
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 5aae3d1..b5da7f8 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
+       unsigned long s_mb_last_start;
+       /* stats for buddy allocator */
+-      atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
+-      atomic_t s_bal_success; /* we found long enough chunks */
+-      atomic_t s_bal_allocated;       /* in blocks */
+-      atomic_t s_bal_ex_scanned;      /* total extents scanned */
+-      atomic_t s_bal_goals;   /* goal hits */
+-      atomic_t s_bal_breaks;  /* too long searches */
+-      atomic_t s_bal_2orders; /* 2^order hits */
++      atomic_unchecked_t s_bal_reqs;  /* number of reqs with len > 1 */
++      atomic_unchecked_t s_bal_success;       /* we found long enough chunks */
++      atomic_unchecked_t s_bal_allocated;     /* in blocks */
++      atomic_unchecked_t s_bal_ex_scanned;    /* total extents scanned */
++      atomic_unchecked_t s_bal_goals; /* goal hits */
++      atomic_unchecked_t s_bal_breaks;        /* too long searches */
++      atomic_unchecked_t s_bal_2orders;       /* 2^order hits */
+       spinlock_t s_bal_lock;
+       unsigned long s_mb_buddies_generated;
+       unsigned long long s_mb_generation_time;
+-      atomic_t s_mb_lost_chunks;
+-      atomic_t s_mb_preallocated;
+-      atomic_t s_mb_discarded;
++      atomic_unchecked_t s_mb_lost_chunks;
++      atomic_unchecked_t s_mb_preallocated;
++      atomic_unchecked_t s_mb_discarded;
+       atomic_t s_lock_busy;
+       /* locality groups */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 59c6750..a549154 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+               BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+               if (EXT4_SB(sb)->s_mb_stats)
+-                      atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
++                      atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
+               break;
+       }
+@@ -2170,7 +2170,7 @@ repeat:
+                       ac->ac_status = AC_STATUS_CONTINUE;
+                       ac->ac_flags |= EXT4_MB_HINT_FIRST;
+                       cr = 3;
+-                      atomic_inc(&sbi->s_mb_lost_chunks);
++                      atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
+                       goto repeat;
+               }
+       }
+@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
+       if (sbi->s_mb_stats) {
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %u blocks %u reqs (%u success)",
+-                              atomic_read(&sbi->s_bal_allocated),
+-                              atomic_read(&sbi->s_bal_reqs),
+-                              atomic_read(&sbi->s_bal_success));
++                              atomic_read_unchecked(&sbi->s_bal_allocated),
++                              atomic_read_unchecked(&sbi->s_bal_reqs),
++                              atomic_read_unchecked(&sbi->s_bal_success));
+               ext4_msg(sb, KERN_INFO,
+                     "mballoc: %u extents scanned, %u goal hits, "
+                               "%u 2^N hits, %u breaks, %u lost",
+-                              atomic_read(&sbi->s_bal_ex_scanned),
+-                              atomic_read(&sbi->s_bal_goals),
+-                              atomic_read(&sbi->s_bal_2orders),
+-                              atomic_read(&sbi->s_bal_breaks),
+-                              atomic_read(&sbi->s_mb_lost_chunks));
++                              atomic_read_unchecked(&sbi->s_bal_ex_scanned),
++                              atomic_read_unchecked(&sbi->s_bal_goals),
++                              atomic_read_unchecked(&sbi->s_bal_2orders),
++                              atomic_read_unchecked(&sbi->s_bal_breaks),
++                              atomic_read_unchecked(&sbi->s_mb_lost_chunks));
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %lu generated and it took %Lu",
+                               sbi->s_mb_buddies_generated,
+                               sbi->s_mb_generation_time);
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %u preallocated, %u discarded",
+-                              atomic_read(&sbi->s_mb_preallocated),
+-                              atomic_read(&sbi->s_mb_discarded));
++                              atomic_read_unchecked(&sbi->s_mb_preallocated),
++                              atomic_read_unchecked(&sbi->s_mb_discarded));
+       }
+       free_percpu(sbi->s_locality_groups);
+@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
+       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+       if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
+-              atomic_inc(&sbi->s_bal_reqs);
+-              atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++              atomic_inc_unchecked(&sbi->s_bal_reqs);
++              atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+               if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
+-                      atomic_inc(&sbi->s_bal_success);
+-              atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++                      atomic_inc_unchecked(&sbi->s_bal_success);
++              atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
+               if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
+                               ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
+-                      atomic_inc(&sbi->s_bal_goals);
++                      atomic_inc_unchecked(&sbi->s_bal_goals);
+               if (ac->ac_found > sbi->s_mb_max_to_scan)
+-                      atomic_inc(&sbi->s_bal_breaks);
++                      atomic_inc_unchecked(&sbi->s_bal_breaks);
+       }
+       if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+       trace_ext4_mb_new_inode_pa(ac, pa);
+       ext4_mb_use_inode_pa(ac, pa);
+-      atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
++      atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
+       ei = EXT4_I(ac->ac_inode);
+       grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+       trace_ext4_mb_new_group_pa(ac, pa);
+       ext4_mb_use_group_pa(ac, pa);
+-      atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++      atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+       grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+       lg = ac->ac_lg;
+@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
+                * from the bitmap and continue.
+                */
+       }
+-      atomic_add(free, &sbi->s_mb_discarded);
++      atomic_add_unchecked(free, &sbi->s_mb_discarded);
+       return err;
+ }
+@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+       ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+       BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+       mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+-      atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
++      atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+       trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+       return 0;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 214461e..3614c89 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
+                   const char *function, unsigned int line, const char *msg)
+ {
+-      __ext4_warning(sb, function, line, msg);
++      __ext4_warning(sb, function, line, "%s", msg);
+       __ext4_warning(sb, function, line,
+                      "MMP failure info: last update time: %llu, last update "
+                      "node: %s, last update device: %s\n",
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 49d3c01..9579efd 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
+       ext4_fsblk_t end = start + input->blocks_count;
+       ext4_group_t group = input->group;
+       ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
+-      unsigned overhead = ext4_group_overhead_blocks(sb, group);
+-      ext4_fsblk_t metaend = start + overhead;
++      unsigned overhead;
++      ext4_fsblk_t metaend;
+       struct buffer_head *bh = NULL;
+       ext4_grpblk_t free_blocks_count, offset;
+       int err = -EINVAL;
++      if (group != sbi->s_groups_count) {
++              ext4_warning(sb, "Cannot add at group %u (only %u groups)",
++                           input->group, sbi->s_groups_count);
++              return -EINVAL;
++      }
++
++      overhead = ext4_group_overhead_blocks(sb, group);
++      metaend = start + overhead;
+       input->free_blocks_count = free_blocks_count =
+               input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
+@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
+                      free_blocks_count, input->reserved_blocks);
+       ext4_get_group_no_and_offset(sb, start, NULL, &offset);
+-      if (group != sbi->s_groups_count)
+-              ext4_warning(sb, "Cannot add at group %u (only %u groups)",
+-                           input->group, sbi->s_groups_count);
+-      else if (offset != 0)
++      if (offset != 0)
+                       ext4_warning(sb, "Last group not full");
+       else if (input->reserved_blocks > input->blocks_count / 5)
+               ext4_warning(sb, "Reserved blocks too high (%u)",
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3f7c39e..227f24f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+ }
+ #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
++static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+       "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
+ #ifdef CONFIG_QUOTA
+@@ -2372,7 +2372,7 @@ struct ext4_attr {
+       ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
+                        const char *, size_t);
+       int offset;
+-};
++} __do_const;
+ static int parse_strtoull(const char *buf,
+               unsigned long long max, unsigned long long *value)
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 6599222..e7bf0de 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+       if (err)
+               return err;
++      if (gr_handle_chroot_fowner(pid, type))
++              return -ENOENT;
++      if (gr_check_protected_task_fowner(pid, type))
++              return -EACCES;
++
+       f_modown(filp, pid, type, force);
+       return 0;
+ }
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index 999ff5c..41f4109 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
+       } else
+               retval = 0;
+       /* copy the mount id */
+-      if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
+-                       sizeof(*mnt_id)) ||
++      if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
+           copy_to_user(ufh, handle,
+                        sizeof(struct file_handle) + handle_bytes))
+               retval = -EFAULT;
+diff --git a/fs/file.c b/fs/file.c
+index 4a78f98..9447397 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/fdtable.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
+       if (!file)
+               return __close_fd(files, fd);
++      gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
+       if (fd >= rlimit(RLIMIT_NOFILE))
+               return -EBADF;
+@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
+       if (unlikely(oldfd == newfd))
+               return -EINVAL;
++      gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
+       if (newfd >= rlimit(RLIMIT_NOFILE))
+               return -EBADF;
+@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
+ int f_dupfd(unsigned int from, struct file *file, unsigned flags)
+ {
+       int err;
++      gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
+       if (from >= rlimit(RLIMIT_NOFILE))
+               return -EINVAL;
+       err = alloc_fd(from, flags);
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index 92567d9..fcd8cbf 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
+       int len = dot ? dot - name : strlen(name);
+       fs = __get_fs_type(name, len);
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
++#else
+       if (!fs && (request_module("fs-%.*s", len, name) == 0))
++#endif
+               fs = __get_fs_type(name, len);
+       if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
+diff --git a/fs/fs_struct.c b/fs/fs_struct.c
+index d8ac61d..79a36f0 100644
+--- a/fs/fs_struct.c
++++ b/fs/fs_struct.c
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+ /*
+@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
+       write_seqcount_begin(&fs->seq);
+       old_root = fs->root;
+       fs->root = *path;
++      gr_set_chroot_entries(current, path);
+       write_seqcount_end(&fs->seq);
+       spin_unlock(&fs->lock);
+       if (old_root.dentry)
+@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
+                       int hits = 0;
+                       spin_lock(&fs->lock);
+                       write_seqcount_begin(&fs->seq);
++                      /* this root replacement is only done by pivot_root,
++                         leave grsec's chroot tagging alone for this task
++                         so that a pivoted root isn't treated as a chroot
++                      */
+                       hits += replace_path(&fs->root, old_root, new_root);
+                       hits += replace_path(&fs->pwd, old_root, new_root);
+                       write_seqcount_end(&fs->seq);
+@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
+               task_lock(tsk);
+               spin_lock(&fs->lock);
+               tsk->fs = NULL;
+-              kill = !--fs->users;
++              gr_clear_chroot_entries(tsk);
++              kill = !atomic_dec_return(&fs->users);
+               spin_unlock(&fs->lock);
+               task_unlock(tsk);
+               if (kill)
+@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+       struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+       /* We don't need to lock fs - think why ;-) */
+       if (fs) {
+-              fs->users = 1;
++              atomic_set(&fs->users, 1);
+               fs->in_exec = 0;
+               spin_lock_init(&fs->lock);
+               seqcount_init(&fs->seq);
+@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+               spin_lock(&old->lock);
+               fs->root = old->root;
+               path_get(&fs->root);
++              /* instead of calling gr_set_chroot_entries here,
++                 we call it from every caller of this function
++              */
+               fs->pwd = old->pwd;
+               path_get(&fs->pwd);
+               spin_unlock(&old->lock);
+@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
+       task_lock(current);
+       spin_lock(&fs->lock);
+-      kill = !--fs->users;
++      kill = !atomic_dec_return(&fs->users);
+       current->fs = new_fs;
++      gr_set_chroot_entries(current, &new_fs->root);
+       spin_unlock(&fs->lock);
+       task_unlock(current);
+@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
+ int current_umask(void)
+ {
+-      return current->fs->umask;
++      return current->fs->umask | gr_acl_umask();
+ }
+ EXPORT_SYMBOL(current_umask);
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+-      .users          = 1,
++      .users          = ATOMIC_INIT(1),
+       .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+       .seq            = SEQCNT_ZERO,
+       .umask          = 0022,
+diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
+index e2cba1f..17a25bb 100644
+--- a/fs/fscache/cookie.c
++++ b/fs/fscache/cookie.c
+@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
+              parent ? (char *) parent->def->name : "<no-parent>",
+              def->name, netfs_data);
+-      fscache_stat(&fscache_n_acquires);
++      fscache_stat_unchecked(&fscache_n_acquires);
+       /* if there's no parent cookie, then we don't create one here either */
+       if (!parent) {
+-              fscache_stat(&fscache_n_acquires_null);
++              fscache_stat_unchecked(&fscache_n_acquires_null);
+               _leave(" [no parent]");
+               return NULL;
+       }
+@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
+       /* allocate and initialise a cookie */
+       cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_acquires_oom);
++              fscache_stat_unchecked(&fscache_n_acquires_oom);
+               _leave(" [ENOMEM]");
+               return NULL;
+       }
+@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
+       switch (cookie->def->type) {
+       case FSCACHE_COOKIE_TYPE_INDEX:
+-              fscache_stat(&fscache_n_cookie_index);
++              fscache_stat_unchecked(&fscache_n_cookie_index);
+               break;
+       case FSCACHE_COOKIE_TYPE_DATAFILE:
+-              fscache_stat(&fscache_n_cookie_data);
++              fscache_stat_unchecked(&fscache_n_cookie_data);
+               break;
+       default:
+-              fscache_stat(&fscache_n_cookie_special);
++              fscache_stat_unchecked(&fscache_n_cookie_special);
+               break;
+       }
+@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
+               if (fscache_acquire_non_index_cookie(cookie) < 0) {
+                       atomic_dec(&parent->n_children);
+                       __fscache_cookie_put(cookie);
+-                      fscache_stat(&fscache_n_acquires_nobufs);
++                      fscache_stat_unchecked(&fscache_n_acquires_nobufs);
+                       _leave(" = NULL");
+                       return NULL;
+               }
+       }
+-      fscache_stat(&fscache_n_acquires_ok);
++      fscache_stat_unchecked(&fscache_n_acquires_ok);
+       _leave(" = %p", cookie);
+       return cookie;
+ }
+@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
+       cache = fscache_select_cache_for_object(cookie->parent);
+       if (!cache) {
+               up_read(&fscache_addremove_sem);
+-              fscache_stat(&fscache_n_acquires_no_cache);
++              fscache_stat_unchecked(&fscache_n_acquires_no_cache);
+               _leave(" = -ENOMEDIUM [no cache]");
+               return -ENOMEDIUM;
+       }
+@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
+       object = cache->ops->alloc_object(cache, cookie);
+       fscache_stat_d(&fscache_n_cop_alloc_object);
+       if (IS_ERR(object)) {
+-              fscache_stat(&fscache_n_object_no_alloc);
++              fscache_stat_unchecked(&fscache_n_object_no_alloc);
+               ret = PTR_ERR(object);
+               goto error;
+       }
+-      fscache_stat(&fscache_n_object_alloc);
++      fscache_stat_unchecked(&fscache_n_object_alloc);
+       object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
+       _enter("{%s}", cookie->def->name);
+-      fscache_stat(&fscache_n_invalidates);
++      fscache_stat_unchecked(&fscache_n_invalidates);
+       /* Only permit invalidation of data files.  Invalidating an index will
+        * require the caller to release all its attachments to the tree rooted
+@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
+ {
+       struct fscache_object *object;
+-      fscache_stat(&fscache_n_updates);
++      fscache_stat_unchecked(&fscache_n_updates);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_updates_null);
++              fscache_stat_unchecked(&fscache_n_updates_null);
+               _leave(" [no cookie]");
+               return;
+       }
+@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+       struct fscache_object *object;
+       unsigned long event;
+-      fscache_stat(&fscache_n_relinquishes);
++      fscache_stat_unchecked(&fscache_n_relinquishes);
+       if (retire)
+-              fscache_stat(&fscache_n_relinquishes_retire);
++              fscache_stat_unchecked(&fscache_n_relinquishes_retire);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_relinquishes_null);
++              fscache_stat_unchecked(&fscache_n_relinquishes_null);
+               _leave(" [no cookie]");
+               return;
+       }
+@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+       /* wait for the cookie to finish being instantiated (or to fail) */
+       if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
+-              fscache_stat(&fscache_n_relinquishes_waitcrt);
++              fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
+               wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
+                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+       }
+diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
+index ee38fef..0a326d4 100644
+--- a/fs/fscache/internal.h
++++ b/fs/fscache/internal.h
+@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
+  * stats.c
+  */
+ #ifdef CONFIG_FSCACHE_STATS
+-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
++extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
++extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+-extern atomic_t fscache_n_op_pend;
+-extern atomic_t fscache_n_op_run;
+-extern atomic_t fscache_n_op_enqueue;
+-extern atomic_t fscache_n_op_deferred_release;
+-extern atomic_t fscache_n_op_release;
+-extern atomic_t fscache_n_op_gc;
+-extern atomic_t fscache_n_op_cancelled;
+-extern atomic_t fscache_n_op_rejected;
++extern atomic_unchecked_t fscache_n_op_pend;
++extern atomic_unchecked_t fscache_n_op_run;
++extern atomic_unchecked_t fscache_n_op_enqueue;
++extern atomic_unchecked_t fscache_n_op_deferred_release;
++extern atomic_unchecked_t fscache_n_op_release;
++extern atomic_unchecked_t fscache_n_op_gc;
++extern atomic_unchecked_t fscache_n_op_cancelled;
++extern atomic_unchecked_t fscache_n_op_rejected;
+-extern atomic_t fscache_n_attr_changed;
+-extern atomic_t fscache_n_attr_changed_ok;
+-extern atomic_t fscache_n_attr_changed_nobufs;
+-extern atomic_t fscache_n_attr_changed_nomem;
+-extern atomic_t fscache_n_attr_changed_calls;
++extern atomic_unchecked_t fscache_n_attr_changed;
++extern atomic_unchecked_t fscache_n_attr_changed_ok;
++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
++extern atomic_unchecked_t fscache_n_attr_changed_calls;
+-extern atomic_t fscache_n_allocs;
+-extern atomic_t fscache_n_allocs_ok;
+-extern atomic_t fscache_n_allocs_wait;
+-extern atomic_t fscache_n_allocs_nobufs;
+-extern atomic_t fscache_n_allocs_intr;
+-extern atomic_t fscache_n_allocs_object_dead;
+-extern atomic_t fscache_n_alloc_ops;
+-extern atomic_t fscache_n_alloc_op_waits;
++extern atomic_unchecked_t fscache_n_allocs;
++extern atomic_unchecked_t fscache_n_allocs_ok;
++extern atomic_unchecked_t fscache_n_allocs_wait;
++extern atomic_unchecked_t fscache_n_allocs_nobufs;
++extern atomic_unchecked_t fscache_n_allocs_intr;
++extern atomic_unchecked_t fscache_n_allocs_object_dead;
++extern atomic_unchecked_t fscache_n_alloc_ops;
++extern atomic_unchecked_t fscache_n_alloc_op_waits;
+-extern atomic_t fscache_n_retrievals;
+-extern atomic_t fscache_n_retrievals_ok;
+-extern atomic_t fscache_n_retrievals_wait;
+-extern atomic_t fscache_n_retrievals_nodata;
+-extern atomic_t fscache_n_retrievals_nobufs;
+-extern atomic_t fscache_n_retrievals_intr;
+-extern atomic_t fscache_n_retrievals_nomem;
+-extern atomic_t fscache_n_retrievals_object_dead;
+-extern atomic_t fscache_n_retrieval_ops;
+-extern atomic_t fscache_n_retrieval_op_waits;
++extern atomic_unchecked_t fscache_n_retrievals;
++extern atomic_unchecked_t fscache_n_retrievals_ok;
++extern atomic_unchecked_t fscache_n_retrievals_wait;
++extern atomic_unchecked_t fscache_n_retrievals_nodata;
++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
++extern atomic_unchecked_t fscache_n_retrievals_intr;
++extern atomic_unchecked_t fscache_n_retrievals_nomem;
++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
++extern atomic_unchecked_t fscache_n_retrieval_ops;
++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
+-extern atomic_t fscache_n_stores;
+-extern atomic_t fscache_n_stores_ok;
+-extern atomic_t fscache_n_stores_again;
+-extern atomic_t fscache_n_stores_nobufs;
+-extern atomic_t fscache_n_stores_oom;
+-extern atomic_t fscache_n_store_ops;
+-extern atomic_t fscache_n_store_calls;
+-extern atomic_t fscache_n_store_pages;
+-extern atomic_t fscache_n_store_radix_deletes;
+-extern atomic_t fscache_n_store_pages_over_limit;
++extern atomic_unchecked_t fscache_n_stores;
++extern atomic_unchecked_t fscache_n_stores_ok;
++extern atomic_unchecked_t fscache_n_stores_again;
++extern atomic_unchecked_t fscache_n_stores_nobufs;
++extern atomic_unchecked_t fscache_n_stores_oom;
++extern atomic_unchecked_t fscache_n_store_ops;
++extern atomic_unchecked_t fscache_n_store_calls;
++extern atomic_unchecked_t fscache_n_store_pages;
++extern atomic_unchecked_t fscache_n_store_radix_deletes;
++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
+-extern atomic_t fscache_n_store_vmscan_not_storing;
+-extern atomic_t fscache_n_store_vmscan_gone;
+-extern atomic_t fscache_n_store_vmscan_busy;
+-extern atomic_t fscache_n_store_vmscan_cancelled;
+-extern atomic_t fscache_n_store_vmscan_wait;
++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++extern atomic_unchecked_t fscache_n_store_vmscan_wait;
+-extern atomic_t fscache_n_marks;
+-extern atomic_t fscache_n_uncaches;
++extern atomic_unchecked_t fscache_n_marks;
++extern atomic_unchecked_t fscache_n_uncaches;
+-extern atomic_t fscache_n_acquires;
+-extern atomic_t fscache_n_acquires_null;
+-extern atomic_t fscache_n_acquires_no_cache;
+-extern atomic_t fscache_n_acquires_ok;
+-extern atomic_t fscache_n_acquires_nobufs;
+-extern atomic_t fscache_n_acquires_oom;
++extern atomic_unchecked_t fscache_n_acquires;
++extern atomic_unchecked_t fscache_n_acquires_null;
++extern atomic_unchecked_t fscache_n_acquires_no_cache;
++extern atomic_unchecked_t fscache_n_acquires_ok;
++extern atomic_unchecked_t fscache_n_acquires_nobufs;
++extern atomic_unchecked_t fscache_n_acquires_oom;
+-extern atomic_t fscache_n_invalidates;
+-extern atomic_t fscache_n_invalidates_run;
++extern atomic_unchecked_t fscache_n_invalidates;
++extern atomic_unchecked_t fscache_n_invalidates_run;
+-extern atomic_t fscache_n_updates;
+-extern atomic_t fscache_n_updates_null;
+-extern atomic_t fscache_n_updates_run;
++extern atomic_unchecked_t fscache_n_updates;
++extern atomic_unchecked_t fscache_n_updates_null;
++extern atomic_unchecked_t fscache_n_updates_run;
+-extern atomic_t fscache_n_relinquishes;
+-extern atomic_t fscache_n_relinquishes_null;
+-extern atomic_t fscache_n_relinquishes_waitcrt;
+-extern atomic_t fscache_n_relinquishes_retire;
++extern atomic_unchecked_t fscache_n_relinquishes;
++extern atomic_unchecked_t fscache_n_relinquishes_null;
++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++extern atomic_unchecked_t fscache_n_relinquishes_retire;
+-extern atomic_t fscache_n_cookie_index;
+-extern atomic_t fscache_n_cookie_data;
+-extern atomic_t fscache_n_cookie_special;
++extern atomic_unchecked_t fscache_n_cookie_index;
++extern atomic_unchecked_t fscache_n_cookie_data;
++extern atomic_unchecked_t fscache_n_cookie_special;
+-extern atomic_t fscache_n_object_alloc;
+-extern atomic_t fscache_n_object_no_alloc;
+-extern atomic_t fscache_n_object_lookups;
+-extern atomic_t fscache_n_object_lookups_negative;
+-extern atomic_t fscache_n_object_lookups_positive;
+-extern atomic_t fscache_n_object_lookups_timed_out;
+-extern atomic_t fscache_n_object_created;
+-extern atomic_t fscache_n_object_avail;
+-extern atomic_t fscache_n_object_dead;
++extern atomic_unchecked_t fscache_n_object_alloc;
++extern atomic_unchecked_t fscache_n_object_no_alloc;
++extern atomic_unchecked_t fscache_n_object_lookups;
++extern atomic_unchecked_t fscache_n_object_lookups_negative;
++extern atomic_unchecked_t fscache_n_object_lookups_positive;
++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
++extern atomic_unchecked_t fscache_n_object_created;
++extern atomic_unchecked_t fscache_n_object_avail;
++extern atomic_unchecked_t fscache_n_object_dead;
+-extern atomic_t fscache_n_checkaux_none;
+-extern atomic_t fscache_n_checkaux_okay;
+-extern atomic_t fscache_n_checkaux_update;
+-extern atomic_t fscache_n_checkaux_obsolete;
++extern atomic_unchecked_t fscache_n_checkaux_none;
++extern atomic_unchecked_t fscache_n_checkaux_okay;
++extern atomic_unchecked_t fscache_n_checkaux_update;
++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
+ extern atomic_t fscache_n_cop_alloc_object;
+ extern atomic_t fscache_n_cop_lookup_object;
+@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
+       atomic_inc(stat);
+ }
++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
++{
++      atomic_inc_unchecked(stat);
++}
++
+ static inline void fscache_stat_d(atomic_t *stat)
+ {
+       atomic_dec(stat);
+@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
+ #define __fscache_stat(stat) (NULL)
+ #define fscache_stat(stat) do {} while (0)
++#define fscache_stat_unchecked(stat) do {} while (0)
+ #define fscache_stat_d(stat) do {} while (0)
+ #endif
+diff --git a/fs/fscache/object.c b/fs/fscache/object.c
+index 50d41c1..10ee117 100644
+--- a/fs/fscache/object.c
++++ b/fs/fscache/object.c
+@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+               /* Invalidate an object on disk */
+       case FSCACHE_OBJECT_INVALIDATING:
+               clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
+-              fscache_stat(&fscache_n_invalidates_run);
++              fscache_stat_unchecked(&fscache_n_invalidates_run);
+               fscache_stat(&fscache_n_cop_invalidate_object);
+               fscache_invalidate_object(object);
+               fscache_stat_d(&fscache_n_cop_invalidate_object);
+@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+               /* update the object metadata on disk */
+       case FSCACHE_OBJECT_UPDATING:
+               clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
+-              fscache_stat(&fscache_n_updates_run);
++              fscache_stat_unchecked(&fscache_n_updates_run);
+               fscache_stat(&fscache_n_cop_update_object);
+               object->cache->ops->update_object(object);
+               fscache_stat_d(&fscache_n_cop_update_object);
+@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+               spin_lock(&object->lock);
+               object->state = FSCACHE_OBJECT_DEAD;
+               spin_unlock(&object->lock);
+-              fscache_stat(&fscache_n_object_dead);
++              fscache_stat_unchecked(&fscache_n_object_dead);
+               goto terminal_transit;
+               /* handle the parent cache of this object being withdrawn from
+@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+               spin_lock(&object->lock);
+               object->state = FSCACHE_OBJECT_DEAD;
+               spin_unlock(&object->lock);
+-              fscache_stat(&fscache_n_object_dead);
++              fscache_stat_unchecked(&fscache_n_object_dead);
+               goto terminal_transit;
+               /* complain about the object being woken up once it is
+@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
+              parent->cookie->def->name, cookie->def->name,
+              object->cache->tag->name);
+-      fscache_stat(&fscache_n_object_lookups);
++      fscache_stat_unchecked(&fscache_n_object_lookups);
+       fscache_stat(&fscache_n_cop_lookup_object);
+       ret = object->cache->ops->lookup_object(object);
+       fscache_stat_d(&fscache_n_cop_lookup_object);
+@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
+       if (ret == -ETIMEDOUT) {
+               /* probably stuck behind another object, so move this one to
+                * the back of the queue */
+-              fscache_stat(&fscache_n_object_lookups_timed_out);
++              fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
+               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+       }
+@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
+       spin_lock(&object->lock);
+       if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+-              fscache_stat(&fscache_n_object_lookups_negative);
++              fscache_stat_unchecked(&fscache_n_object_lookups_negative);
+               /* transit here to allow write requests to begin stacking up
+                * and read requests to begin returning ENODATA */
+@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
+        * result, in which case there may be data available */
+       spin_lock(&object->lock);
+       if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+-              fscache_stat(&fscache_n_object_lookups_positive);
++              fscache_stat_unchecked(&fscache_n_object_lookups_positive);
+               clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
+               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+       } else {
+               ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+-              fscache_stat(&fscache_n_object_created);
++              fscache_stat_unchecked(&fscache_n_object_created);
+               object->state = FSCACHE_OBJECT_AVAILABLE;
+               spin_unlock(&object->lock);
+@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
+       fscache_enqueue_dependents(object);
+       fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+-      fscache_stat(&fscache_n_object_avail);
++      fscache_stat_unchecked(&fscache_n_object_avail);
+       _leave("");
+ }
+@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+       enum fscache_checkaux result;
+       if (!object->cookie->def->check_aux) {
+-              fscache_stat(&fscache_n_checkaux_none);
++              fscache_stat_unchecked(&fscache_n_checkaux_none);
+               return FSCACHE_CHECKAUX_OKAY;
+       }
+@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+       switch (result) {
+               /* entry okay as is */
+       case FSCACHE_CHECKAUX_OKAY:
+-              fscache_stat(&fscache_n_checkaux_okay);
++              fscache_stat_unchecked(&fscache_n_checkaux_okay);
+               break;
+               /* entry requires update */
+       case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+-              fscache_stat(&fscache_n_checkaux_update);
++              fscache_stat_unchecked(&fscache_n_checkaux_update);
+               break;
+               /* entry requires deletion */
+       case FSCACHE_CHECKAUX_OBSOLETE:
+-              fscache_stat(&fscache_n_checkaux_obsolete);
++              fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
+               break;
+       default:
+diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
+index 762a9ec..2023284 100644
+--- a/fs/fscache/operation.c
++++ b/fs/fscache/operation.c
+@@ -17,7 +17,7 @@
+ #include <linux/slab.h>
+ #include "internal.h"
+-atomic_t fscache_op_debug_id;
++atomic_unchecked_t fscache_op_debug_id;
+ EXPORT_SYMBOL(fscache_op_debug_id);
+ /**
+@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
+       ASSERTCMP(atomic_read(&op->usage), >, 0);
+       ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+-      fscache_stat(&fscache_n_op_enqueue);
++      fscache_stat_unchecked(&fscache_n_op_enqueue);
+       switch (op->flags & FSCACHE_OP_TYPE) {
+       case FSCACHE_OP_ASYNC:
+               _debug("queue async");
+@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
+               wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+       if (op->processor)
+               fscache_enqueue_operation(op);
+-      fscache_stat(&fscache_n_op_run);
++      fscache_stat_unchecked(&fscache_n_op_run);
+ }
+ /*
+@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+               if (object->n_in_progress > 0) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+               } else if (!list_empty(&object->pending_ops)) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+                       fscache_start_operations(object);
+               } else {
+                       ASSERTCMP(object->n_in_progress, ==, 0);
+@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+               object->n_exclusive++;  /* reads and writes must wait */
+               atomic_inc(&op->usage);
+               list_add_tail(&op->pend_link, &object->pending_ops);
+-              fscache_stat(&fscache_n_op_pend);
++              fscache_stat_unchecked(&fscache_n_op_pend);
+               ret = 0;
+       } else {
+               /* If we're in any other state, there must have been an I/O
+@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
+               if (object->n_exclusive > 0) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+               } else if (!list_empty(&object->pending_ops)) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+                       fscache_start_operations(object);
+               } else {
+                       ASSERTCMP(object->n_exclusive, ==, 0);
+@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
+               object->n_ops++;
+               atomic_inc(&op->usage);
+               list_add_tail(&op->pend_link, &object->pending_ops);
+-              fscache_stat(&fscache_n_op_pend);
++              fscache_stat_unchecked(&fscache_n_op_pend);
+               ret = 0;
+       } else if (object->state == FSCACHE_OBJECT_DYING ||
+                  object->state == FSCACHE_OBJECT_LC_DYING ||
+                  object->state == FSCACHE_OBJECT_WITHDRAWING) {
+-              fscache_stat(&fscache_n_op_rejected);
++              fscache_stat_unchecked(&fscache_n_op_rejected);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
+       } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
+       ret = -EBUSY;
+       if (op->state == FSCACHE_OP_ST_PENDING) {
+               ASSERT(!list_empty(&op->pend_link));
+-              fscache_stat(&fscache_n_op_cancelled);
++              fscache_stat_unchecked(&fscache_n_op_cancelled);
+               list_del_init(&op->pend_link);
+               if (do_cancel)
+                       do_cancel(op);
+@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
+       while (!list_empty(&object->pending_ops)) {
+               op = list_entry(object->pending_ops.next,
+                               struct fscache_operation, pend_link);
+-              fscache_stat(&fscache_n_op_cancelled);
++              fscache_stat_unchecked(&fscache_n_op_cancelled);
+               list_del_init(&op->pend_link);
+               ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
+                   op->state, ==, FSCACHE_OP_ST_CANCELLED);
+       op->state = FSCACHE_OP_ST_DEAD;
+-      fscache_stat(&fscache_n_op_release);
++      fscache_stat_unchecked(&fscache_n_op_release);
+       if (op->release) {
+               op->release(op);
+@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
+        * lock, and defer it otherwise */
+       if (!spin_trylock(&object->lock)) {
+               _debug("defer put");
+-              fscache_stat(&fscache_n_op_deferred_release);
++              fscache_stat_unchecked(&fscache_n_op_deferred_release);
+               cache = object->cache;
+               spin_lock(&cache->op_gc_list_lock);
+@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
+               _debug("GC DEFERRED REL OBJ%x OP%x",
+                      object->debug_id, op->debug_id);
+-              fscache_stat(&fscache_n_op_gc);
++              fscache_stat_unchecked(&fscache_n_op_gc);
+               ASSERTCMP(atomic_read(&op->usage), ==, 0);
+               ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
+diff --git a/fs/fscache/page.c b/fs/fscache/page.c
+index ff000e5..c44ec6d 100644
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -61,7 +61,7 @@ try_again:
+       val = radix_tree_lookup(&cookie->stores, page->index);
+       if (!val) {
+               rcu_read_unlock();
+-              fscache_stat(&fscache_n_store_vmscan_not_storing);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
+               __fscache_uncache_page(cookie, page);
+               return true;
+       }
+@@ -91,11 +91,11 @@ try_again:
+       spin_unlock(&cookie->stores_lock);
+       if (xpage) {
+-              fscache_stat(&fscache_n_store_vmscan_cancelled);
+-              fscache_stat(&fscache_n_store_radix_deletes);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
++              fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+               ASSERTCMP(xpage, ==, page);
+       } else {
+-              fscache_stat(&fscache_n_store_vmscan_gone);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
+       }
+       wake_up_bit(&cookie->flags, 0);
+@@ -110,11 +110,11 @@ page_busy:
+        * sleeping on memory allocation, so we may need to impose a timeout
+        * too. */
+       if (!(gfp & __GFP_WAIT)) {
+-              fscache_stat(&fscache_n_store_vmscan_busy);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
+               return false;
+       }
+-      fscache_stat(&fscache_n_store_vmscan_wait);
++      fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
+       __fscache_wait_on_page_write(cookie, page);
+       gfp &= ~__GFP_WAIT;
+       goto try_again;
+@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
+                                    FSCACHE_COOKIE_STORING_TAG);
+               if (!radix_tree_tag_get(&cookie->stores, page->index,
+                                       FSCACHE_COOKIE_PENDING_TAG)) {
+-                      fscache_stat(&fscache_n_store_radix_deletes);
++                      fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+                       xpage = radix_tree_delete(&cookie->stores, page->index);
+               }
+               spin_unlock(&cookie->stores_lock);
+@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
+       _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+-      fscache_stat(&fscache_n_attr_changed_calls);
++      fscache_stat_unchecked(&fscache_n_attr_changed_calls);
+       if (fscache_object_is_active(object)) {
+               fscache_stat(&fscache_n_cop_attr_changed);
+@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+-      fscache_stat(&fscache_n_attr_changed);
++      fscache_stat_unchecked(&fscache_n_attr_changed);
+       op = kzalloc(sizeof(*op), GFP_KERNEL);
+       if (!op) {
+-              fscache_stat(&fscache_n_attr_changed_nomem);
++              fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
+               _leave(" = -ENOMEM");
+               return -ENOMEM;
+       }
+@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+       if (fscache_submit_exclusive_op(object, op) < 0)
+               goto nobufs;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_attr_changed_ok);
++      fscache_stat_unchecked(&fscache_n_attr_changed_ok);
+       fscache_put_operation(op);
+       _leave(" = 0");
+       return 0;
+@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+ nobufs:
+       spin_unlock(&cookie->lock);
+       kfree(op);
+-      fscache_stat(&fscache_n_attr_changed_nobufs);
++      fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
+       _leave(" = %d", -ENOBUFS);
+       return -ENOBUFS;
+ }
+@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
+       /* allocate a retrieval operation and attempt to submit it */
+       op = kzalloc(sizeof(*op), GFP_NOIO);
+       if (!op) {
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+               return NULL;
+       }
+@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+               return 0;
+       }
+-      fscache_stat(&fscache_n_retrievals_wait);
++      fscache_stat_unchecked(&fscache_n_retrievals_wait);
+       jif = jiffies;
+       if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+                       fscache_wait_bit_interruptible,
+                       TASK_INTERRUPTIBLE) != 0) {
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+               _leave(" = -ERESTARTSYS");
+               return -ERESTARTSYS;
+       }
+@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
+  */
+ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+                                                struct fscache_retrieval *op,
+-                                               atomic_t *stat_op_waits,
+-                                               atomic_t *stat_object_dead)
++                                               atomic_unchecked_t *stat_op_waits,
++                                               atomic_unchecked_t *stat_object_dead)
+ {
+       int ret;
+@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+               goto check_if_dead;
+       _debug(">>> WT");
+-      fscache_stat(stat_op_waits);
++      fscache_stat_unchecked(stat_op_waits);
+       if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+                       fscache_wait_bit_interruptible,
+                       TASK_INTERRUPTIBLE) != 0) {
+@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ check_if_dead:
+       if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
+-              fscache_stat(stat_object_dead);
++              fscache_stat_unchecked(stat_object_dead);
+               _leave(" = -ENOBUFS [cancelled]");
+               return -ENOBUFS;
+       }
+       if (unlikely(fscache_object_is_dead(object))) {
+               pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
+               fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
+-              fscache_stat(stat_object_dead);
++              fscache_stat_unchecked(stat_object_dead);
+               return -ENOBUFS;
+       }
+       return 0;
+@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+       _enter("%p,%p,,,", cookie, page);
+-      fscache_stat(&fscache_n_retrievals);
++      fscache_stat_unchecked(&fscache_n_retrievals);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+               goto nobufs_unlock_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_retrieval_ops);
++      fscache_stat_unchecked(&fscache_n_retrieval_ops);
+       /* pin the netfs read context in case we need to do the actual netfs
+        * read because we've encountered a cache read failure */
+@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ENOMEM)
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+       else if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+       else if (ret == -ENODATA)
+-              fscache_stat(&fscache_n_retrievals_nodata);
++              fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_retrievals_nobufs);
++              fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       else
+-              fscache_stat(&fscache_n_retrievals_ok);
++              fscache_stat_unchecked(&fscache_n_retrievals_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -467,7 +467,7 @@ nobufs_unlock:
+       spin_unlock(&cookie->lock);
+       kfree(op);
+ nobufs:
+-      fscache_stat(&fscache_n_retrievals_nobufs);
++      fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+       _enter("%p,,%d,,,", cookie, *nr_pages);
+-      fscache_stat(&fscache_n_retrievals);
++      fscache_stat_unchecked(&fscache_n_retrievals);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+               goto nobufs_unlock_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_retrieval_ops);
++      fscache_stat_unchecked(&fscache_n_retrieval_ops);
+       /* pin the netfs read context in case we need to do the actual netfs
+        * read because we've encountered a cache read failure */
+@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ENOMEM)
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+       else if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+       else if (ret == -ENODATA)
+-              fscache_stat(&fscache_n_retrievals_nodata);
++              fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_retrievals_nobufs);
++              fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       else
+-              fscache_stat(&fscache_n_retrievals_ok);
++              fscache_stat_unchecked(&fscache_n_retrievals_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -591,7 +591,7 @@ nobufs_unlock:
+       spin_unlock(&cookie->lock);
+       kfree(op);
+ nobufs:
+-      fscache_stat(&fscache_n_retrievals_nobufs);
++      fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+       _enter("%p,%p,,,", cookie, page);
+-      fscache_stat(&fscache_n_allocs);
++      fscache_stat_unchecked(&fscache_n_allocs);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+               goto nobufs_unlock;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_alloc_ops);
++      fscache_stat_unchecked(&fscache_n_alloc_ops);
+       ret = fscache_wait_for_retrieval_activation(
+               object, op,
+@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_allocs_intr);
++              fscache_stat_unchecked(&fscache_n_allocs_intr);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_allocs_nobufs);
++              fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+       else
+-              fscache_stat(&fscache_n_allocs_ok);
++              fscache_stat_unchecked(&fscache_n_allocs_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -677,7 +677,7 @@ nobufs_unlock:
+       spin_unlock(&cookie->lock);
+       kfree(op);
+ nobufs:
+-      fscache_stat(&fscache_n_allocs_nobufs);
++      fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       spin_lock(&cookie->stores_lock);
+-      fscache_stat(&fscache_n_store_calls);
++      fscache_stat_unchecked(&fscache_n_store_calls);
+       /* find a page to store */
+       page = NULL;
+@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       page = results[0];
+       _debug("gang %d [%lx]", n, page->index);
+       if (page->index > op->store_limit) {
+-              fscache_stat(&fscache_n_store_pages_over_limit);
++              fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
+               goto superseded;
+       }
+@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+-      fscache_stat(&fscache_n_store_pages);
++      fscache_stat_unchecked(&fscache_n_store_pages);
+       fscache_stat(&fscache_n_cop_write_page);
+       ret = object->cache->ops->write_page(op, page);
+       fscache_stat_d(&fscache_n_cop_write_page);
+@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+       ASSERT(PageFsCache(page));
+-      fscache_stat(&fscache_n_stores);
++      fscache_stat_unchecked(&fscache_n_stores);
+       if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+               _leave(" = -ENOBUFS [invalidating]");
+@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+-      op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
++      op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+       op->store_limit = object->store_limit;
+       if (fscache_submit_op(object, &op->op) < 0)
+@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       spin_unlock(&cookie->lock);
+       radix_tree_preload_end();
+-      fscache_stat(&fscache_n_store_ops);
+-      fscache_stat(&fscache_n_stores_ok);
++      fscache_stat_unchecked(&fscache_n_store_ops);
++      fscache_stat_unchecked(&fscache_n_stores_ok);
+       /* the work queue now carries its own ref on the object */
+       fscache_put_operation(&op->op);
+@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       return 0;
+ already_queued:
+-      fscache_stat(&fscache_n_stores_again);
++      fscache_stat_unchecked(&fscache_n_stores_again);
+ already_pending:
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+       spin_unlock(&cookie->lock);
+       radix_tree_preload_end();
+       kfree(op);
+-      fscache_stat(&fscache_n_stores_ok);
++      fscache_stat_unchecked(&fscache_n_stores_ok);
+       _leave(" = 0");
+       return 0;
+@@ -959,14 +959,14 @@ nobufs:
+       spin_unlock(&cookie->lock);
+       radix_tree_preload_end();
+       kfree(op);
+-      fscache_stat(&fscache_n_stores_nobufs);
++      fscache_stat_unchecked(&fscache_n_stores_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ nomem_free:
+       kfree(op);
+ nomem:
+-      fscache_stat(&fscache_n_stores_oom);
++      fscache_stat_unchecked(&fscache_n_stores_oom);
+       _leave(" = -ENOMEM");
+       return -ENOMEM;
+ }
+@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+       ASSERTCMP(page, !=, NULL);
+-      fscache_stat(&fscache_n_uncaches);
++      fscache_stat_unchecked(&fscache_n_uncaches);
+       /* cache withdrawal may beat us to it */
+       if (!PageFsCache(page))
+@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
+       struct fscache_cookie *cookie = op->op.object->cookie;
+ #ifdef CONFIG_FSCACHE_STATS
+-      atomic_inc(&fscache_n_marks);
++      atomic_inc_unchecked(&fscache_n_marks);
+ #endif
+       _debug("- mark %p{%lx}", page, page->index);
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 40d13c7..ddf52b9 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -18,99 +18,99 @@
+ /*
+  * operation counters
+  */
+-atomic_t fscache_n_op_pend;
+-atomic_t fscache_n_op_run;
+-atomic_t fscache_n_op_enqueue;
+-atomic_t fscache_n_op_requeue;
+-atomic_t fscache_n_op_deferred_release;
+-atomic_t fscache_n_op_release;
+-atomic_t fscache_n_op_gc;
+-atomic_t fscache_n_op_cancelled;
+-atomic_t fscache_n_op_rejected;
++atomic_unchecked_t fscache_n_op_pend;
++atomic_unchecked_t fscache_n_op_run;
++atomic_unchecked_t fscache_n_op_enqueue;
++atomic_unchecked_t fscache_n_op_requeue;
++atomic_unchecked_t fscache_n_op_deferred_release;
++atomic_unchecked_t fscache_n_op_release;
++atomic_unchecked_t fscache_n_op_gc;
++atomic_unchecked_t fscache_n_op_cancelled;
++atomic_unchecked_t fscache_n_op_rejected;
+-atomic_t fscache_n_attr_changed;
+-atomic_t fscache_n_attr_changed_ok;
+-atomic_t fscache_n_attr_changed_nobufs;
+-atomic_t fscache_n_attr_changed_nomem;
+-atomic_t fscache_n_attr_changed_calls;
++atomic_unchecked_t fscache_n_attr_changed;
++atomic_unchecked_t fscache_n_attr_changed_ok;
++atomic_unchecked_t fscache_n_attr_changed_nobufs;
++atomic_unchecked_t fscache_n_attr_changed_nomem;
++atomic_unchecked_t fscache_n_attr_changed_calls;
+-atomic_t fscache_n_allocs;
+-atomic_t fscache_n_allocs_ok;
+-atomic_t fscache_n_allocs_wait;
+-atomic_t fscache_n_allocs_nobufs;
+-atomic_t fscache_n_allocs_intr;
+-atomic_t fscache_n_allocs_object_dead;
+-atomic_t fscache_n_alloc_ops;
+-atomic_t fscache_n_alloc_op_waits;
++atomic_unchecked_t fscache_n_allocs;
++atomic_unchecked_t fscache_n_allocs_ok;
++atomic_unchecked_t fscache_n_allocs_wait;
++atomic_unchecked_t fscache_n_allocs_nobufs;
++atomic_unchecked_t fscache_n_allocs_intr;
++atomic_unchecked_t fscache_n_allocs_object_dead;
++atomic_unchecked_t fscache_n_alloc_ops;
++atomic_unchecked_t fscache_n_alloc_op_waits;
+-atomic_t fscache_n_retrievals;
+-atomic_t fscache_n_retrievals_ok;
+-atomic_t fscache_n_retrievals_wait;
+-atomic_t fscache_n_retrievals_nodata;
+-atomic_t fscache_n_retrievals_nobufs;
+-atomic_t fscache_n_retrievals_intr;
+-atomic_t fscache_n_retrievals_nomem;
+-atomic_t fscache_n_retrievals_object_dead;
+-atomic_t fscache_n_retrieval_ops;
+-atomic_t fscache_n_retrieval_op_waits;
++atomic_unchecked_t fscache_n_retrievals;
++atomic_unchecked_t fscache_n_retrievals_ok;
++atomic_unchecked_t fscache_n_retrievals_wait;
++atomic_unchecked_t fscache_n_retrievals_nodata;
++atomic_unchecked_t fscache_n_retrievals_nobufs;
++atomic_unchecked_t fscache_n_retrievals_intr;
++atomic_unchecked_t fscache_n_retrievals_nomem;
++atomic_unchecked_t fscache_n_retrievals_object_dead;
++atomic_unchecked_t fscache_n_retrieval_ops;
++atomic_unchecked_t fscache_n_retrieval_op_waits;
+-atomic_t fscache_n_stores;
+-atomic_t fscache_n_stores_ok;
+-atomic_t fscache_n_stores_again;
+-atomic_t fscache_n_stores_nobufs;
+-atomic_t fscache_n_stores_oom;
+-atomic_t fscache_n_store_ops;
+-atomic_t fscache_n_store_calls;
+-atomic_t fscache_n_store_pages;
+-atomic_t fscache_n_store_radix_deletes;
+-atomic_t fscache_n_store_pages_over_limit;
++atomic_unchecked_t fscache_n_stores;
++atomic_unchecked_t fscache_n_stores_ok;
++atomic_unchecked_t fscache_n_stores_again;
++atomic_unchecked_t fscache_n_stores_nobufs;
++atomic_unchecked_t fscache_n_stores_oom;
++atomic_unchecked_t fscache_n_store_ops;
++atomic_unchecked_t fscache_n_store_calls;
++atomic_unchecked_t fscache_n_store_pages;
++atomic_unchecked_t fscache_n_store_radix_deletes;
++atomic_unchecked_t fscache_n_store_pages_over_limit;
+-atomic_t fscache_n_store_vmscan_not_storing;
+-atomic_t fscache_n_store_vmscan_gone;
+-atomic_t fscache_n_store_vmscan_busy;
+-atomic_t fscache_n_store_vmscan_cancelled;
+-atomic_t fscache_n_store_vmscan_wait;
++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++atomic_unchecked_t fscache_n_store_vmscan_gone;
++atomic_unchecked_t fscache_n_store_vmscan_busy;
++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++atomic_unchecked_t fscache_n_store_vmscan_wait;
+-atomic_t fscache_n_marks;
+-atomic_t fscache_n_uncaches;
++atomic_unchecked_t fscache_n_marks;
++atomic_unchecked_t fscache_n_uncaches;
+-atomic_t fscache_n_acquires;
+-atomic_t fscache_n_acquires_null;
+-atomic_t fscache_n_acquires_no_cache;
+-atomic_t fscache_n_acquires_ok;
+-atomic_t fscache_n_acquires_nobufs;
+-atomic_t fscache_n_acquires_oom;
++atomic_unchecked_t fscache_n_acquires;
++atomic_unchecked_t fscache_n_acquires_null;
++atomic_unchecked_t fscache_n_acquires_no_cache;
++atomic_unchecked_t fscache_n_acquires_ok;
++atomic_unchecked_t fscache_n_acquires_nobufs;
++atomic_unchecked_t fscache_n_acquires_oom;
+-atomic_t fscache_n_invalidates;
+-atomic_t fscache_n_invalidates_run;
++atomic_unchecked_t fscache_n_invalidates;
++atomic_unchecked_t fscache_n_invalidates_run;
+-atomic_t fscache_n_updates;
+-atomic_t fscache_n_updates_null;
+-atomic_t fscache_n_updates_run;
++atomic_unchecked_t fscache_n_updates;
++atomic_unchecked_t fscache_n_updates_null;
++atomic_unchecked_t fscache_n_updates_run;
+-atomic_t fscache_n_relinquishes;
+-atomic_t fscache_n_relinquishes_null;
+-atomic_t fscache_n_relinquishes_waitcrt;
+-atomic_t fscache_n_relinquishes_retire;
++atomic_unchecked_t fscache_n_relinquishes;
++atomic_unchecked_t fscache_n_relinquishes_null;
++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++atomic_unchecked_t fscache_n_relinquishes_retire;
+-atomic_t fscache_n_cookie_index;
+-atomic_t fscache_n_cookie_data;
+-atomic_t fscache_n_cookie_special;
++atomic_unchecked_t fscache_n_cookie_index;
++atomic_unchecked_t fscache_n_cookie_data;
++atomic_unchecked_t fscache_n_cookie_special;
+-atomic_t fscache_n_object_alloc;
+-atomic_t fscache_n_object_no_alloc;
+-atomic_t fscache_n_object_lookups;
+-atomic_t fscache_n_object_lookups_negative;
+-atomic_t fscache_n_object_lookups_positive;
+-atomic_t fscache_n_object_lookups_timed_out;
+-atomic_t fscache_n_object_created;
+-atomic_t fscache_n_object_avail;
+-atomic_t fscache_n_object_dead;
++atomic_unchecked_t fscache_n_object_alloc;
++atomic_unchecked_t fscache_n_object_no_alloc;
++atomic_unchecked_t fscache_n_object_lookups;
++atomic_unchecked_t fscache_n_object_lookups_negative;
++atomic_unchecked_t fscache_n_object_lookups_positive;
++atomic_unchecked_t fscache_n_object_lookups_timed_out;
++atomic_unchecked_t fscache_n_object_created;
++atomic_unchecked_t fscache_n_object_avail;
++atomic_unchecked_t fscache_n_object_dead;
+-atomic_t fscache_n_checkaux_none;
+-atomic_t fscache_n_checkaux_okay;
+-atomic_t fscache_n_checkaux_update;
+-atomic_t fscache_n_checkaux_obsolete;
++atomic_unchecked_t fscache_n_checkaux_none;
++atomic_unchecked_t fscache_n_checkaux_okay;
++atomic_unchecked_t fscache_n_checkaux_update;
++atomic_unchecked_t fscache_n_checkaux_obsolete;
+ atomic_t fscache_n_cop_alloc_object;
+ atomic_t fscache_n_cop_lookup_object;
+@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
+       seq_puts(m, "FS-Cache statistics\n");
+       seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+-                 atomic_read(&fscache_n_cookie_index),
+-                 atomic_read(&fscache_n_cookie_data),
+-                 atomic_read(&fscache_n_cookie_special));
++                 atomic_read_unchecked(&fscache_n_cookie_index),
++                 atomic_read_unchecked(&fscache_n_cookie_data),
++                 atomic_read_unchecked(&fscache_n_cookie_special));
+       seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+-                 atomic_read(&fscache_n_object_alloc),
+-                 atomic_read(&fscache_n_object_no_alloc),
+-                 atomic_read(&fscache_n_object_avail),
+-                 atomic_read(&fscache_n_object_dead));
++                 atomic_read_unchecked(&fscache_n_object_alloc),
++                 atomic_read_unchecked(&fscache_n_object_no_alloc),
++                 atomic_read_unchecked(&fscache_n_object_avail),
++                 atomic_read_unchecked(&fscache_n_object_dead));
+       seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+-                 atomic_read(&fscache_n_checkaux_none),
+-                 atomic_read(&fscache_n_checkaux_okay),
+-                 atomic_read(&fscache_n_checkaux_update),
+-                 atomic_read(&fscache_n_checkaux_obsolete));
++                 atomic_read_unchecked(&fscache_n_checkaux_none),
++                 atomic_read_unchecked(&fscache_n_checkaux_okay),
++                 atomic_read_unchecked(&fscache_n_checkaux_update),
++                 atomic_read_unchecked(&fscache_n_checkaux_obsolete));
+       seq_printf(m, "Pages  : mrk=%u unc=%u\n",
+-                 atomic_read(&fscache_n_marks),
+-                 atomic_read(&fscache_n_uncaches));
++                 atomic_read_unchecked(&fscache_n_marks),
++                 atomic_read_unchecked(&fscache_n_uncaches));
+       seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+                  " oom=%u\n",
+-                 atomic_read(&fscache_n_acquires),
+-                 atomic_read(&fscache_n_acquires_null),
+-                 atomic_read(&fscache_n_acquires_no_cache),
+-                 atomic_read(&fscache_n_acquires_ok),
+-                 atomic_read(&fscache_n_acquires_nobufs),
+-                 atomic_read(&fscache_n_acquires_oom));
++                 atomic_read_unchecked(&fscache_n_acquires),
++                 atomic_read_unchecked(&fscache_n_acquires_null),
++                 atomic_read_unchecked(&fscache_n_acquires_no_cache),
++                 atomic_read_unchecked(&fscache_n_acquires_ok),
++                 atomic_read_unchecked(&fscache_n_acquires_nobufs),
++                 atomic_read_unchecked(&fscache_n_acquires_oom));
+       seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
+-                 atomic_read(&fscache_n_object_lookups),
+-                 atomic_read(&fscache_n_object_lookups_negative),
+-                 atomic_read(&fscache_n_object_lookups_positive),
+-                 atomic_read(&fscache_n_object_created),
+-                 atomic_read(&fscache_n_object_lookups_timed_out));
++                 atomic_read_unchecked(&fscache_n_object_lookups),
++                 atomic_read_unchecked(&fscache_n_object_lookups_negative),
++                 atomic_read_unchecked(&fscache_n_object_lookups_positive),
++                 atomic_read_unchecked(&fscache_n_object_created),
++                 atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
+       seq_printf(m, "Invals : n=%u run=%u\n",
+-                 atomic_read(&fscache_n_invalidates),
+-                 atomic_read(&fscache_n_invalidates_run));
++                 atomic_read_unchecked(&fscache_n_invalidates),
++                 atomic_read_unchecked(&fscache_n_invalidates_run));
+       seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+-                 atomic_read(&fscache_n_updates),
+-                 atomic_read(&fscache_n_updates_null),
+-                 atomic_read(&fscache_n_updates_run));
++                 atomic_read_unchecked(&fscache_n_updates),
++                 atomic_read_unchecked(&fscache_n_updates_null),
++                 atomic_read_unchecked(&fscache_n_updates_run));
+       seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+-                 atomic_read(&fscache_n_relinquishes),
+-                 atomic_read(&fscache_n_relinquishes_null),
+-                 atomic_read(&fscache_n_relinquishes_waitcrt),
+-                 atomic_read(&fscache_n_relinquishes_retire));
++                 atomic_read_unchecked(&fscache_n_relinquishes),
++                 atomic_read_unchecked(&fscache_n_relinquishes_null),
++                 atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
++                 atomic_read_unchecked(&fscache_n_relinquishes_retire));
+       seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+-                 atomic_read(&fscache_n_attr_changed),
+-                 atomic_read(&fscache_n_attr_changed_ok),
+-                 atomic_read(&fscache_n_attr_changed_nobufs),
+-                 atomic_read(&fscache_n_attr_changed_nomem),
+-                 atomic_read(&fscache_n_attr_changed_calls));
++                 atomic_read_unchecked(&fscache_n_attr_changed),
++                 atomic_read_unchecked(&fscache_n_attr_changed_ok),
++                 atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
++                 atomic_read_unchecked(&fscache_n_attr_changed_nomem),
++                 atomic_read_unchecked(&fscache_n_attr_changed_calls));
+       seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
+-                 atomic_read(&fscache_n_allocs),
+-                 atomic_read(&fscache_n_allocs_ok),
+-                 atomic_read(&fscache_n_allocs_wait),
+-                 atomic_read(&fscache_n_allocs_nobufs),
+-                 atomic_read(&fscache_n_allocs_intr));
++                 atomic_read_unchecked(&fscache_n_allocs),
++                 atomic_read_unchecked(&fscache_n_allocs_ok),
++                 atomic_read_unchecked(&fscache_n_allocs_wait),
++                 atomic_read_unchecked(&fscache_n_allocs_nobufs),
++                 atomic_read_unchecked(&fscache_n_allocs_intr));
+       seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
+-                 atomic_read(&fscache_n_alloc_ops),
+-                 atomic_read(&fscache_n_alloc_op_waits),
+-                 atomic_read(&fscache_n_allocs_object_dead));
++                 atomic_read_unchecked(&fscache_n_alloc_ops),
++                 atomic_read_unchecked(&fscache_n_alloc_op_waits),
++                 atomic_read_unchecked(&fscache_n_allocs_object_dead));
+       seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+                  " int=%u oom=%u\n",
+-                 atomic_read(&fscache_n_retrievals),
+-                 atomic_read(&fscache_n_retrievals_ok),
+-                 atomic_read(&fscache_n_retrievals_wait),
+-                 atomic_read(&fscache_n_retrievals_nodata),
+-                 atomic_read(&fscache_n_retrievals_nobufs),
+-                 atomic_read(&fscache_n_retrievals_intr),
+-                 atomic_read(&fscache_n_retrievals_nomem));
++                 atomic_read_unchecked(&fscache_n_retrievals),
++                 atomic_read_unchecked(&fscache_n_retrievals_ok),
++                 atomic_read_unchecked(&fscache_n_retrievals_wait),
++                 atomic_read_unchecked(&fscache_n_retrievals_nodata),
++                 atomic_read_unchecked(&fscache_n_retrievals_nobufs),
++                 atomic_read_unchecked(&fscache_n_retrievals_intr),
++                 atomic_read_unchecked(&fscache_n_retrievals_nomem));
+       seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
+-                 atomic_read(&fscache_n_retrieval_ops),
+-                 atomic_read(&fscache_n_retrieval_op_waits),
+-                 atomic_read(&fscache_n_retrievals_object_dead));
++                 atomic_read_unchecked(&fscache_n_retrieval_ops),
++                 atomic_read_unchecked(&fscache_n_retrieval_op_waits),
++                 atomic_read_unchecked(&fscache_n_retrievals_object_dead));
+       seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+-                 atomic_read(&fscache_n_stores),
+-                 atomic_read(&fscache_n_stores_ok),
+-                 atomic_read(&fscache_n_stores_again),
+-                 atomic_read(&fscache_n_stores_nobufs),
+-                 atomic_read(&fscache_n_stores_oom));
++                 atomic_read_unchecked(&fscache_n_stores),
++                 atomic_read_unchecked(&fscache_n_stores_ok),
++                 atomic_read_unchecked(&fscache_n_stores_again),
++                 atomic_read_unchecked(&fscache_n_stores_nobufs),
++                 atomic_read_unchecked(&fscache_n_stores_oom));
+       seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+-                 atomic_read(&fscache_n_store_ops),
+-                 atomic_read(&fscache_n_store_calls),
+-                 atomic_read(&fscache_n_store_pages),
+-                 atomic_read(&fscache_n_store_radix_deletes),
+-                 atomic_read(&fscache_n_store_pages_over_limit));
++                 atomic_read_unchecked(&fscache_n_store_ops),
++                 atomic_read_unchecked(&fscache_n_store_calls),
++                 atomic_read_unchecked(&fscache_n_store_pages),
++                 atomic_read_unchecked(&fscache_n_store_radix_deletes),
++                 atomic_read_unchecked(&fscache_n_store_pages_over_limit));
+       seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
+-                 atomic_read(&fscache_n_store_vmscan_not_storing),
+-                 atomic_read(&fscache_n_store_vmscan_gone),
+-                 atomic_read(&fscache_n_store_vmscan_busy),
+-                 atomic_read(&fscache_n_store_vmscan_cancelled),
+-                 atomic_read(&fscache_n_store_vmscan_wait));
++                 atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_gone),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_busy),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_wait));
+       seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
+-                 atomic_read(&fscache_n_op_pend),
+-                 atomic_read(&fscache_n_op_run),
+-                 atomic_read(&fscache_n_op_enqueue),
+-                 atomic_read(&fscache_n_op_cancelled),
+-                 atomic_read(&fscache_n_op_rejected));
++                 atomic_read_unchecked(&fscache_n_op_pend),
++                 atomic_read_unchecked(&fscache_n_op_run),
++                 atomic_read_unchecked(&fscache_n_op_enqueue),
++                 atomic_read_unchecked(&fscache_n_op_cancelled),
++                 atomic_read_unchecked(&fscache_n_op_rejected));
+       seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
+-                 atomic_read(&fscache_n_op_deferred_release),
+-                 atomic_read(&fscache_n_op_release),
+-                 atomic_read(&fscache_n_op_gc));
++                 atomic_read_unchecked(&fscache_n_op_deferred_release),
++                 atomic_read_unchecked(&fscache_n_op_release),
++                 atomic_read_unchecked(&fscache_n_op_gc));
+       seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+                  atomic_read(&fscache_n_cop_alloc_object),
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index aef34b1..59bfd7b 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -600,10 +600,12 @@ static int __init cuse_init(void)
+               INIT_LIST_HEAD(&cuse_conntbl[i]);
+       /* inherit and extend fuse_dev_operations */
+-      cuse_channel_fops               = fuse_dev_operations;
+-      cuse_channel_fops.owner         = THIS_MODULE;
+-      cuse_channel_fops.open          = cuse_channel_open;
+-      cuse_channel_fops.release       = cuse_channel_release;
++      pax_open_kernel();
++      memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
++      *(void **)&cuse_channel_fops.owner      = THIS_MODULE;
++      *(void **)&cuse_channel_fops.open       = cuse_channel_open;
++      *(void **)&cuse_channel_fops.release    = cuse_channel_release;
++      pax_close_kernel();
+       cuse_class = class_create(THIS_MODULE, "cuse");
+       if (IS_ERR(cuse_class))
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 1d55f94..088da65 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+       ret = 0;
+       pipe_lock(pipe);
+-      if (!pipe->readers) {
++      if (!atomic_read(&pipe->readers)) {
+               send_sig(SIGPIPE, current, 0);
+               if (!ret)
+                       ret = -EPIPE;
+@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+               page_nr++;
+               ret += buf->len;
+-              if (pipe->files)
++              if (atomic_read(&pipe->files))
+                       do_wakeup = 1;
+       }
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 5b12746..b481b03 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
+       return link;
+ }
+-static void free_link(char *link)
++static void free_link(const char *link)
+ {
+       if (!IS_ERR(link))
+               free_page((unsigned long) link);
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 62b484e..0f9a140 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1441,7 +1441,7 @@ out:
+ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+-      char *s = nd_get_link(nd);
++      const char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+ }
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index a3f868a..bb308ae 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct hstate *h = hstate_file(file);
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       struct vm_unmapped_area_info info;
+       if (len & ~huge_page_mask(h))
+@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              info.low_limit += mm->delta_mmap;
++#endif
++
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
+@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
+ };
+ MODULE_ALIAS_FS("hugetlbfs");
+-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
++struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
+ static int can_do_hugetlb_shm(void)
+ {
+diff --git a/fs/inode.c b/fs/inode.c
+index 00d5fc3..98ce7d7 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
+ #ifdef CONFIG_SMP
+       if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+-              static atomic_t shared_last_ino;
+-              int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
++              static atomic_unchecked_t shared_last_ino;
++              int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
+               res = next - LAST_INO_BATCH;
+       }
+diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
+index 4a6cf28..d3a29d3 100644
+--- a/fs/jffs2/erase.c
++++ b/fs/jffs2/erase.c
+@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
+               struct jffs2_unknown_node marker = {
+                       .magic =        cpu_to_je16(JFFS2_MAGIC_BITMASK),
+                       .nodetype =     cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+-                      .totlen =       cpu_to_je32(c->cleanmarker_size)
++                      .totlen =       cpu_to_je32(c->cleanmarker_size),
++                      .hdr_crc =      cpu_to_je32(0)
+               };
+               jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index a6597d6..41b30ec 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
+ {
+       .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+       .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+-      .totlen = constant_cpu_to_je32(8)
++      .totlen = constant_cpu_to_je32(8),
++      .hdr_crc = constant_cpu_to_je32(0)
+ };
+ /*
+diff --git a/fs/jfs/super.c b/fs/jfs/super.c
+index 788e0a9..8433098 100644
+--- a/fs/jfs/super.c
++++ b/fs/jfs/super.c
+@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
+       jfs_inode_cachep =
+           kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
+-                          SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
++                          SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
+                           init_once);
+       if (jfs_inode_cachep == NULL)
+               return -ENOMEM;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 916da8c..1588998 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+                       for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+                               struct dentry *next;
++                              char d_name[sizeof(next->d_iname)];
++                              const unsigned char *name;
++
+                               next = list_entry(p, struct dentry, d_u.d_child);
+                               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+                               if (!simple_positive(next)) {
+@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+                               spin_unlock(&next->d_lock);
+                               spin_unlock(&dentry->d_lock);
+-                              if (filldir(dirent, next->d_name.name, 
++                              name = next->d_name.name;
++                              if (name == next->d_iname) {
++                                      memcpy(d_name, name, next->d_name.len);
++                                      name = d_name;
++                              }
++                              if (filldir(dirent, name, 
+                                           next->d_name.len, filp->f_pos, 
+                                           next->d_inode->i_ino, 
+                                           dt_type(next->d_inode)) < 0)
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index acd3947..1f896e2 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
+ /*
+  * Cookie counter for NLM requests
+  */
+-static atomic_t       nlm_cookie = ATOMIC_INIT(0x1234);
++static atomic_unchecked_t     nlm_cookie = ATOMIC_INIT(0x1234);
+ void nlmclnt_next_cookie(struct nlm_cookie *c)
+ {
+-      u32     cookie = atomic_inc_return(&nlm_cookie);
++      u32     cookie = atomic_inc_return_unchecked(&nlm_cookie);
+       memcpy(c->data, &cookie, 4);
+       c->len=4;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index a2aa97d..10d6c41 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
+       svc_sock_update_bufs(serv);
+       serv->sv_maxconn = nlm_max_connections;
+-      nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
++      nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
+       if (IS_ERR(nlmsvc_task)) {
+               error = PTR_ERR(nlmsvc_task);
+               printk(KERN_WARNING
+diff --git a/fs/locks.c b/fs/locks.c
+index cb424a4..850e4dd 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
+               return;
+       if (filp->f_op && filp->f_op->flock) {
+-              struct file_lock fl = {
++              struct file_lock flock = {
+                       .fl_pid = current->tgid,
+                       .fl_file = filp,
+                       .fl_flags = FL_FLOCK,
+                       .fl_type = F_UNLCK,
+                       .fl_end = OFFSET_MAX,
+               };
+-              filp->f_op->flock(filp, F_SETLKW, &fl);
+-              if (fl.fl_ops && fl.fl_ops->fl_release_private)
+-                      fl.fl_ops->fl_release_private(&fl);
++              filp->f_op->flock(filp, F_SETLKW, &flock);
++              if (flock.fl_ops && flock.fl_ops->fl_release_private)
++                      flock.fl_ops->fl_release_private(&flock);
+       }
+       lock_flocks();
+diff --git a/fs/namei.c b/fs/namei.c
+index 9ed9361..2b72db1 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
+       if (ret != -EACCES)
+               return ret;
++#ifdef CONFIG_GRKERNSEC
++      /* we'll block if we have to log due to a denied capability use */
++      if (mask & MAY_NOT_BLOCK)
++              return -ECHILD;
++#endif
++
+       if (S_ISDIR(inode->i_mode)) {
+               /* DACs are overridable for directories */
+-              if (inode_capable(inode, CAP_DAC_OVERRIDE))
+-                      return 0;
+               if (!(mask & MAY_WRITE))
+-                      if (inode_capable(inode, CAP_DAC_READ_SEARCH))
++                      if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
++                          inode_capable(inode, CAP_DAC_READ_SEARCH))
+                               return 0;
++              if (inode_capable(inode, CAP_DAC_OVERRIDE))
++                      return 0;
+               return -EACCES;
+       }
+       /*
++       * Searching includes executable on directories, else just read.
++       */
++      mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
++      if (mask == MAY_READ)
++              if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
++                  inode_capable(inode, CAP_DAC_READ_SEARCH))
++                      return 0;
++
++      /*
+        * Read/write DACs are always overridable.
+        * Executable DACs are overridable when there is
+        * at least one exec bit set.
+@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
+               if (inode_capable(inode, CAP_DAC_OVERRIDE))
+                       return 0;
+-      /*
+-       * Searching includes executable on directories, else just read.
+-       */
+-      mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+-      if (mask == MAY_READ)
+-              if (inode_capable(inode, CAP_DAC_READ_SEARCH))
+-                      return 0;
+-
+       return -EACCES;
+ }
+@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ {
+       struct dentry *dentry = link->dentry;
+       int error;
+-      char *s;
++      const char *s;
+       BUG_ON(nd->flags & LOOKUP_RCU);
+@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+       if (error)
+               goto out_put_nd_path;
++      if (gr_handle_follow_link(dentry->d_parent->d_inode,
++                                dentry->d_inode, dentry, nd->path.mnt)) {
++              error = -EACCES;
++              goto out_put_nd_path;
++      }       
++
+       nd->last_type = LAST_BIND;
+       *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+       error = PTR_ERR(*p);
+@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+               if (res)
+                       break;
+               res = walk_component(nd, path, LOOKUP_FOLLOW);
++              if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
++                      res = -EACCES;
+               put_link(nd, &link, cookie);
+       } while (res > 0);
+@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
+ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+ {
+       unsigned long a, b, adata, bdata, mask, hash, len;
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       hash = a = 0;
+       len = -sizeof(unsigned long);
+@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
+                       if (err)
+                               break;
+                       err = lookup_last(nd, &path);
++                      if (!err && gr_handle_symlink_owner(&link, nd->inode))
++                              err = -EACCES;
+                       put_link(nd, &link, cookie);
+               }
+       }
+@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
+       if (!err)
+               err = complete_walk(nd);
++      if (!err && !(nd->flags & LOOKUP_PARENT)) {
++              if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++                      path_put(&nd->path);
++                      err = -ENOENT;
++              }
++      }
++
+       if (!err && nd->flags & LOOKUP_DIRECTORY) {
+               if (!can_lookup(nd->inode)) {
+                       path_put(&nd->path);
+@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
+               retval = path_lookupat(dfd, name->name,
+                                               flags | LOOKUP_REVAL, nd);
+-      if (likely(!retval))
++      if (likely(!retval)) {
+               audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
++              if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
++                      if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
++                              path_put(&nd->path);
++                              return -ENOENT;
++                      }
++              }
++      }
+       return retval;
+ }
+@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+       if (flag & O_NOATIME && !inode_owner_or_capable(inode))
+               return -EPERM;
++      if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
++              return -EPERM;
++      if (gr_handle_rawio(inode))
++              return -EPERM;
++      if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
++              return -EACCES;
++
+       return 0;
+ }
+@@ -2602,7 +2641,7 @@ looked_up:
+  * cleared otherwise prior to returning.
+  */
+ static int lookup_open(struct nameidata *nd, struct path *path,
+-                      struct file *file,
++                      struct path *link, struct file *file,
+                       const struct open_flags *op,
+                       bool got_write, int *opened)
+ {
+@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+       /* Negative dentry, just create the file */
+       if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
+               umode_t mode = op->mode;
++
++              if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
++                      error = -EACCES;
++                      goto out_dput;
++              }
++
++              if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
++                      error = -EACCES;
++                      goto out_dput;
++              }
++
+               if (!IS_POSIXACL(dir->d_inode))
+                       mode &= ~current_umask();
+               /*
+@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+                                  nd->flags & LOOKUP_EXCL);
+               if (error)
+                       goto out_dput;
++              else
++                      gr_handle_create(dentry, nd->path.mnt);
+       }
+ out_no_open:
+       path->dentry = dentry;
+@@ -2672,7 +2724,7 @@ out_dput:
+ /*
+  * Handle the last step of open()
+  */
+-static int do_last(struct nameidata *nd, struct path *path,
++static int do_last(struct nameidata *nd, struct path *path, struct path *link,
+                  struct file *file, const struct open_flags *op,
+                  int *opened, struct filename *name)
+ {
+@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
+               error = complete_walk(nd);
+               if (error)
+                       return error;
++              if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++                      error = -ENOENT;
++                      goto out;
++              }
+               audit_inode(name, nd->path.dentry, 0);
+               if (open_flag & O_CREAT) {
+                       error = -EISDIR;
+                       goto out;
+               }
++              if (link && gr_handle_symlink_owner(link, nd->inode)) {
++                      error = -EACCES;
++                      goto out;
++              }
+               goto finish_open;
+       case LAST_BIND:
+               error = complete_walk(nd);
+               if (error)
+                       return error;
++              if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
++                      error = -ENOENT;
++                      goto out;
++              }
++              if (link && gr_handle_symlink_owner(link, nd->inode)) {
++                      error = -EACCES;
++                      goto out;
++              }
+               audit_inode(name, dir, 0);
+               goto finish_open;
+       }
+@@ -2759,7 +2827,7 @@ retry_lookup:
+                */
+       }
+       mutex_lock(&dir->d_inode->i_mutex);
+-      error = lookup_open(nd, path, file, op, got_write, opened);
++      error = lookup_open(nd, path, link, file, op, got_write, opened);
+       mutex_unlock(&dir->d_inode->i_mutex);
+       if (error <= 0) {
+@@ -2783,11 +2851,28 @@ retry_lookup:
+               goto finish_open_created;
+       }
++      if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
++              error = -ENOENT;
++              goto exit_dput;
++      }
++      if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
++              error = -EACCES;
++              goto exit_dput;
++      }
++
+       /*
+        * create/update audit record if it already exists.
+        */
+-      if (path->dentry->d_inode)
++      if (path->dentry->d_inode) {
++              /* only check if O_CREAT is specified, all other checks need to go
++                 into may_open */
++              if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
++                      error = -EACCES;
++                      goto exit_dput;
++              }
++
+               audit_inode(name, path->dentry, 0);
++      }
+       /*
+        * If atomic_open() acquired write access it is dropped now due to
+@@ -2828,6 +2913,11 @@ finish_lookup:
+                       }
+               }
+               BUG_ON(inode != path->dentry->d_inode);
++              /* if we're resolving a symlink to another symlink */
++              if (link && gr_handle_symlink_owner(link, inode)) {
++                      error = -EACCES;
++                      goto out;
++              }
+               return 1;
+       }
+@@ -2837,7 +2927,6 @@ finish_lookup:
+               save_parent.dentry = nd->path.dentry;
+               save_parent.mnt = mntget(path->mnt);
+               nd->path.dentry = path->dentry;
+-
+       }
+       nd->inode = inode;
+       /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
+@@ -2846,6 +2935,16 @@ finish_lookup:
+               path_put(&save_parent);
+               return error;
+       }
++
++      if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++              error = -ENOENT;
++              goto out;
++      }
++      if (link && gr_handle_symlink_owner(link, nd->inode)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       error = -EISDIR;
+       if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
+               goto out;
+@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+       if (unlikely(error))
+               goto out;
+-      error = do_last(nd, &path, file, op, &opened, pathname);
++      error = do_last(nd, &path, NULL, file, op, &opened, pathname);
+       while (unlikely(error > 0)) { /* trailing symlink */
+               struct path link = path;
+               void *cookie;
+@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+               error = follow_link(&link, nd, &cookie);
+               if (unlikely(error))
+                       break;
+-              error = do_last(nd, &path, file, op, &opened, pathname);
++              error = do_last(nd, &path, &link, file, op, &opened, pathname);
+               put_link(nd, &link, cookie);
+       }
+ out:
+@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+               goto unlock;
+       error = -EEXIST;
+-      if (dentry->d_inode)
++      if (dentry->d_inode) {
++              if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
++                      error = -ENOENT;
++              }
+               goto fail;
++      }
+       /*
+        * Special case - lookup gave negative, but... we had foo/bar/
+        * From the vfs_mknod() POV we just have a negative dentry -
+@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+ }
+ EXPORT_SYMBOL(user_path_create);
++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
++{
++      struct filename *tmp = getname(pathname);
++      struct dentry *res;
++      if (IS_ERR(tmp))
++              return ERR_CAST(tmp);
++      res = kern_path_create(dfd, tmp->name, path, lookup_flags);
++      if (IS_ERR(res))
++              putname(tmp);
++      else
++              *to = tmp;
++      return res;
++}
++
+ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ {
+       int error = may_create(dir, dentry);
+@@ -3177,6 +3294,17 @@ retry:
+       if (!IS_POSIXACL(path.dentry->d_inode))
+               mode &= ~current_umask();
++
++      if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
++              error = -EPERM;
++              goto out;
++      }
++
++      if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       error = security_path_mknod(&path, dentry, mode, dev);
+       if (error)
+               goto out;
+@@ -3193,6 +3321,8 @@ retry:
+                       break;
+       }
+ out:
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+               lookup_flags |= LOOKUP_REVAL;
+@@ -3245,9 +3375,16 @@ retry:
+       if (!IS_POSIXACL(path.dentry->d_inode))
+               mode &= ~current_umask();
++      if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
++              error = -EACCES;
++              goto out;
++      }
+       error = security_path_mkdir(&path, dentry, mode);
+       if (!error)
+               error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
++out:
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+               lookup_flags |= LOOKUP_REVAL;
+@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+       struct filename *name;
+       struct dentry *dentry;
+       struct nameidata nd;
++      ino_t saved_ino = 0;
++      dev_t saved_dev = 0;
+       unsigned int lookup_flags = 0;
+ retry:
+       name = user_path_parent(dfd, pathname, &nd, lookup_flags);
+@@ -3360,10 +3499,21 @@ retry:
+               error = -ENOENT;
+               goto exit3;
+       }
++
++      saved_ino = dentry->d_inode->i_ino;
++      saved_dev = gr_get_dev_from_dentry(dentry);
++
++      if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
++              error = -EACCES;
++              goto exit3;
++      }
++
+       error = security_path_rmdir(&nd.path, dentry);
+       if (error)
+               goto exit3;
+       error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
++      if (!error && (saved_dev || saved_ino))
++              gr_handle_delete(saved_ino, saved_dev);
+ exit3:
+       dput(dentry);
+ exit2:
+@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+       struct dentry *dentry;
+       struct nameidata nd;
+       struct inode *inode = NULL;
++      ino_t saved_ino = 0;
++      dev_t saved_dev = 0;
+       unsigned int lookup_flags = 0;
+ retry:
+       name = user_path_parent(dfd, pathname, &nd, lookup_flags);
+@@ -3455,10 +3607,22 @@ retry:
+               if (!inode)
+                       goto slashes;
+               ihold(inode);
++
++              if (inode->i_nlink <= 1) {
++                      saved_ino = inode->i_ino;
++                      saved_dev = gr_get_dev_from_dentry(dentry);
++              }
++              if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
++                      error = -EACCES;
++                      goto exit2;
++              }
++
+               error = security_path_unlink(&nd.path, dentry);
+               if (error)
+                       goto exit2;
+               error = vfs_unlink(nd.path.dentry->d_inode, dentry);
++              if (!error && (saved_ino || saved_dev))
++                      gr_handle_delete(saved_ino, saved_dev);
+ exit2:
+               dput(dentry);
+       }
+@@ -3536,9 +3700,17 @@ retry:
+       if (IS_ERR(dentry))
+               goto out_putname;
++      if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       error = security_path_symlink(&path, dentry, from->name);
+       if (!error)
+               error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
++out:
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+               lookup_flags |= LOOKUP_REVAL;
+@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+ {
+       struct dentry *new_dentry;
+       struct path old_path, new_path;
++      struct filename *to = NULL;
+       int how = 0;
+       int error;
+@@ -3635,7 +3808,7 @@ retry:
+       if (error)
+               return error;
+-      new_dentry = user_path_create(newdfd, newname, &new_path,
++      new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
+                                       (how & LOOKUP_REVAL));
+       error = PTR_ERR(new_dentry);
+       if (IS_ERR(new_dentry))
+@@ -3647,11 +3820,28 @@ retry:
+       error = may_linkat(&old_path);
+       if (unlikely(error))
+               goto out_dput;
++
++      if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
++                             old_path.dentry->d_inode,
++                             old_path.dentry->d_inode->i_mode, to)) {
++              error = -EACCES;
++              goto out_dput;
++      }
++
++      if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
++                              old_path.dentry, old_path.mnt, to)) {
++              error = -EACCES;
++              goto out_dput;
++      }
++
+       error = security_path_link(old_path.dentry, &new_path, new_dentry);
+       if (error)
+               goto out_dput;
+       error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
++      if (!error)
++              gr_handle_create(new_dentry, new_path.mnt);
+ out_dput:
++      putname(to);
+       done_path_create(&new_path, new_dentry);
+       if (retry_estale(error, how)) {
+               how |= LOOKUP_REVAL;
+@@ -3897,12 +4087,21 @@ retry:
+       if (new_dentry == trap)
+               goto exit5;
++      error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
++                                   old_dentry, old_dir->d_inode, oldnd.path.mnt,
++                                   to);
++      if (error)
++              goto exit5;
++
+       error = security_path_rename(&oldnd.path, old_dentry,
+                                    &newnd.path, new_dentry);
+       if (error)
+               goto exit5;
+       error = vfs_rename(old_dir->d_inode, old_dentry,
+                                  new_dir->d_inode, new_dentry);
++      if (!error)
++              gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
++                               new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
+ exit5:
+       dput(new_dentry);
+ exit4:
+@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+ {
++      char tmpbuf[64];
++      const char *newlink;
+       int len;
+       len = PTR_ERR(link);
+@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+       len = strlen(link);
+       if (len > (unsigned) buflen)
+               len = buflen;
+-      if (copy_to_user(buffer, link, len))
++
++      if (len < sizeof(tmpbuf)) {
++              memcpy(tmpbuf, link, len);
++              newlink = tmpbuf;
++      } else
++              newlink = link;
++
++      if (copy_to_user(buffer, newlink, len))
+               len = -EFAULT;
+ out:
+       return len;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7b1ca9b..6faeccf 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
+               if (!(sb->s_flags & MS_RDONLY))
+                       retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+               up_write(&sb->s_umount);
++
++              gr_log_remount(mnt->mnt_devname, retval);
++
+               return retval;
+       }
+@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
+       }
+       br_write_unlock(&vfsmount_lock);
+       namespace_unlock();
++
++      gr_log_unmount(mnt->mnt_devname, retval);
++
+       return retval;
+ }
+@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
+  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+  */
+-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
++SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
+ {
+       struct path path;
+       struct mount *mnt;
+@@ -1342,7 +1348,7 @@ out:
+ /*
+  *    The 2.0 compatible umount. No flags.
+  */
+-SYSCALL_DEFINE1(oldumount, char __user *, name)
++SYSCALL_DEFINE1(oldumount, const char __user *, name)
+ {
+       return sys_umount(name, 0);
+ }
+@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
+                  MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+                  MS_STRICTATIME);
++      if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
++              retval = -EPERM;
++              goto dput_out;
++      }
++
++      if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
++              retval = -EPERM;
++              goto dput_out;
++      }
++
+       if (flags & MS_REMOUNT)
+               retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+                                   data_page);
+@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
+                                     dev_name, data_page);
+ dput_out:
+       path_put(&path);
++
++      gr_log_mount(dev_name, dir_name, retval);
++
+       return retval;
+ }
+@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+ }
+ EXPORT_SYMBOL(mount_subtree);
+-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+-              char __user *, type, unsigned long, flags, void __user *, data)
++SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
++              const char __user *, type, unsigned long, flags, void __user *, data)
+ {
+       int ret;
+       char *kernel_type;
+@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+       if (error)
+               goto out2;
++      if (gr_handle_chroot_pivot()) {
++              error = -EPERM;
++              goto out2;
++      }
++
+       get_fs_root(current->fs, &root);
+       old_mp = lock_mount(&old);
+       error = PTR_ERR(old_mp);
+@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+           !nsown_capable(CAP_SYS_ADMIN))
+               return -EPERM;
+-      if (fs->users != 1)
++      if (atomic_read(&fs->users) != 1)
+               return -EINVAL;
+       get_mnt_ns(mnt_ns);
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index cff089a..4c3d57a 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
+       struct svc_rqst *rqstp;
+       int (*callback_svc)(void *vrqstp);
+       struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+-      char svc_name[12];
+       int ret;
+       nfs_callback_bc_serv(minorversion, xprt, serv);
+@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
+       svc_sock_update_bufs(serv);
+-      sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+       cb_info->serv = serv;
+       cb_info->rqst = rqstp;
+-      cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
++      cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
+       if (IS_ERR(cb_info->task)) {
+               ret = PTR_ERR(cb_info->task);
+               svc_exit_thread(cb_info->rqst);
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index a35582c..ebbdcd5 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -51,7 +51,7 @@ struct callback_op {
+       callback_decode_arg_t decode_args;
+       callback_encode_res_t encode_res;
+       long res_maxsize;
+-};
++} __do_const;
+ static struct callback_op callback_ops[];
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index c1c7a9d..7afa0b8 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
+       return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
+ }
+-static atomic_long_t nfs_attr_generation_counter;
++static atomic_long_unchecked_t nfs_attr_generation_counter;
+ static unsigned long nfs_read_attr_generation_counter(void)
+ {
+-      return atomic_long_read(&nfs_attr_generation_counter);
++      return atomic_long_read_unchecked(&nfs_attr_generation_counter);
+ }
+ unsigned long nfs_inc_attr_generation_counter(void)
+ {
+-      return atomic_long_inc_return(&nfs_attr_generation_counter);
++      return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
+ }
+ void nfs_fattr_init(struct nfs_fattr *fattr)
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 2c37442..9b9538b 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+       snprintf(buf, sizeof(buf), "%s-manager",
+                       rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+       rcu_read_unlock();
+-      task = kthread_run(nfs4_run_state_manager, clp, buf);
++      task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
+       if (IS_ERR(task)) {
+               printk(KERN_ERR "%s: kthread_run: %ld\n",
+                       __func__, PTR_ERR(task));
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 27d74a2..c4c2a73 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
+       nfsd4op_rsize op_rsize_bop;
+       stateid_getter op_get_currentstateid;
+       stateid_setter op_set_currentstateid;
+-};
++} __do_const;
+ static struct nfsd4_operation nfsd4_ops[];
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 582321a..0224663 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
+ typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
+-static nfsd4_dec nfsd4_dec_ops[] = {
++static const nfsd4_dec nfsd4_dec_ops[] = {
+       [OP_ACCESS]             = (nfsd4_dec)nfsd4_decode_access,
+       [OP_CLOSE]              = (nfsd4_dec)nfsd4_decode_close,
+       [OP_COMMIT]             = (nfsd4_dec)nfsd4_decode_commit,
+@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
+       [OP_RELEASE_LOCKOWNER]  = (nfsd4_dec)nfsd4_decode_release_lockowner,
+ };
+-static nfsd4_dec nfsd41_dec_ops[] = {
++static const nfsd4_dec nfsd41_dec_ops[] = {
+       [OP_ACCESS]             = (nfsd4_dec)nfsd4_decode_access,
+       [OP_CLOSE]              = (nfsd4_dec)nfsd4_decode_close,
+       [OP_COMMIT]             = (nfsd4_dec)nfsd4_decode_commit,
+@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
+ };
+ struct nfsd4_minorversion_ops {
+-      nfsd4_dec *decoders;
++      const nfsd4_dec *decoders;
+       int nops;
+ };
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index e76244e..9fe8f2f1 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+ {
+       struct svc_cacherep *rp = rqstp->rq_cacherep;
+       struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
+-      int             len;
++      long            len;
+       size_t          bufsize = 0;
+       if (!rp)
+               return;
+-      len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
+-      len >>= 2;
++      if (statp) {
++              len = (char*)statp - (char*)resv->iov_base;
++              len = resv->iov_len - len;
++              len >>= 2;
++      }
+       /* Don't cache excessive amounts of data and XDR failures */
+       if (!statp || len > (256 >> 2)) {
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index baf149a..76b86ad 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+       } else {
+               oldfs = get_fs();
+               set_fs(KERNEL_DS);
+-              host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
++              host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
+               set_fs(oldfs);
+       }
+@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+       /* Write the data. */
+       oldfs = get_fs(); set_fs(KERNEL_DS);
+-      host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
++      host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
+       set_fs(oldfs);
+       if (host_err < 0)
+               goto out_nfserr;
+@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+        */
+       oldfs = get_fs(); set_fs(KERNEL_DS);
+-      host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
++      host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
+       set_fs(oldfs);
+       if (host_err < 0)
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index fea6bd5..8ee9d81 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
+ int register_nls(struct nls_table * nls)
+ {
+-      struct nls_table ** tmp = &tables;
++      struct nls_table *tmp = tables;
+       if (nls->next)
+               return -EBUSY;
+       spin_lock(&nls_lock);
+-      while (*tmp) {
+-              if (nls == *tmp) {
++      while (tmp) {
++              if (nls == tmp) {
+                       spin_unlock(&nls_lock);
+                       return -EBUSY;
+               }
+-              tmp = &(*tmp)->next;
++              tmp = tmp->next;
+       }
+-      nls->next = tables;
++      pax_open_kernel();
++      *(struct nls_table **)&nls->next = tables;
++      pax_close_kernel();
+       tables = nls;
+       spin_unlock(&nls_lock);
+       return 0;       
+@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
+ int unregister_nls(struct nls_table * nls)
+ {
+-      struct nls_table ** tmp = &tables;
++      struct nls_table * const * tmp = &tables;
+       spin_lock(&nls_lock);
+       while (*tmp) {
+               if (nls == *tmp) {
+-                      *tmp = nls->next;
++                      pax_open_kernel();
++                      *(struct nls_table **)tmp = nls->next;
++                      pax_close_kernel();
+                       spin_unlock(&nls_lock);
+                       return 0;
+               }
+diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
+index 7424929..35f6be5 100644
+--- a/fs/nls/nls_euc-jp.c
++++ b/fs/nls/nls_euc-jp.c
+@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
+       p_nls = load_nls("cp932");
+       if (p_nls) {
+-              table.charset2upper = p_nls->charset2upper;
+-              table.charset2lower = p_nls->charset2lower;
++              pax_open_kernel();
++              *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
++              *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
++              pax_close_kernel();
+               return register_nls(&table);
+       }
+diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
+index e7bc1d7..06bd4bb 100644
+--- a/fs/nls/nls_koi8-ru.c
++++ b/fs/nls/nls_koi8-ru.c
+@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
+       p_nls = load_nls("koi8-u");
+       if (p_nls) {
+-              table.charset2upper = p_nls->charset2upper;
+-              table.charset2lower = p_nls->charset2lower;
++              pax_open_kernel();
++              *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
++              *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
++              pax_close_kernel();
+               return register_nls(&table);
+       }
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 77cc85d..a1e6299 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+       fd = fanotify_event_metadata.fd;
+       ret = -EFAULT;
+-      if (copy_to_user(buf, &fanotify_event_metadata,
+-                       fanotify_event_metadata.event_len))
++      if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
++          copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
+               goto out_close_fd;
+       ret = prepare_for_access_response(group, event, fd);
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index 7b51b05..5ea5ef6 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
+  * get set to 0 so it will never get 'freed'
+  */
+ static struct fsnotify_event *q_overflow_event;
+-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+ /**
+  * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
+@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+  */
+ u32 fsnotify_get_cookie(void)
+ {
+-      return atomic_inc_return(&fsnotify_sync_cookie);
++      return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
+ }
+ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
+diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
+index aa411c3..c260a84 100644
+--- a/fs/ntfs/dir.c
++++ b/fs/ntfs/dir.c
+@@ -1329,7 +1329,7 @@ find_next_index_buffer:
+       ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
+                       ~(s64)(ndir->itype.index.block_size - 1)));
+       /* Bounds checks. */
+-      if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
++      if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+               ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+                               "inode 0x%lx or driver bug.", vdir->i_ino);
+               goto err_out;
+diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
+index c5670b8..01a3656 100644
+--- a/fs/ntfs/file.c
++++ b/fs/ntfs/file.c
+@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
+ #endif /* NTFS_RW */
+ };
+-const struct file_operations ntfs_empty_file_ops = {};
++const struct file_operations ntfs_empty_file_ops __read_only;
+-const struct inode_operations ntfs_empty_inode_ops = {};
++const struct inode_operations ntfs_empty_inode_ops __read_only;
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 20dfec7..e238cb7 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -1756,7 +1756,7 @@ try_again:
+               goto out;
+       } else if (ret == 1) {
+               clusters_need = wc->w_clen;
+-              ret = ocfs2_refcount_cow(inode, filp, di_bh,
++              ret = ocfs2_refcount_cow(inode, di_bh,
+                                        wc->w_cpos, wc->w_clen, UINT_MAX);
+               if (ret) {
+                       mlog_errno(ret);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index ff54014..ff125fd 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
+       if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
+               goto out;
+-      return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
++      return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
+ out:
+       return status;
+@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
+               zero_clusters = last_cpos - zero_cpos;
+       if (needs_cow) {
+-              rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
++              rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
+                                       zero_clusters, UINT_MAX);
+               if (rc) {
+                       mlog_errno(rc);
+@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
+       *meta_level = 1;
+-      ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
++      ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
+       if (ret)
+               mlog_errno(ret);
+ out:
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index aebeacd..0dcdd26 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
+               goto bail;
+       }
+-      atomic_inc(&osb->alloc_stats.moves);
++      atomic_inc_unchecked(&osb->alloc_stats.moves);
+ bail:
+       if (handle)
+diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
+index f1fc172..452068b 100644
+--- a/fs/ocfs2/move_extents.c
++++ b/fs/ocfs2/move_extents.c
+@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
+       u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
+       u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
+-      ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
++      ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
+                                              p_cpos, new_p_cpos, len);
+       if (ret) {
+               mlog_errno(ret);
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index d355e6e..578d905 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -235,11 +235,11 @@ enum ocfs2_vol_state
+ struct ocfs2_alloc_stats
+ {
+-      atomic_t moves;
+-      atomic_t local_data;
+-      atomic_t bitmap_data;
+-      atomic_t bg_allocs;
+-      atomic_t bg_extends;
++      atomic_unchecked_t moves;
++      atomic_unchecked_t local_data;
++      atomic_unchecked_t bitmap_data;
++      atomic_unchecked_t bg_allocs;
++      atomic_unchecked_t bg_extends;
+ };
+ enum ocfs2_local_alloc_state
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 998b17e..aefe414 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -49,7 +49,6 @@
+ struct ocfs2_cow_context {
+       struct inode *inode;
+-      struct file *file;
+       u32 cow_start;
+       u32 cow_len;
+       struct ocfs2_extent_tree data_et;
+@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
+                           u32 *num_clusters,
+                           unsigned int *extent_flags);
+       int (*cow_duplicate_clusters)(handle_t *handle,
+-                                    struct file *file,
++                                    struct inode *inode,
+                                     u32 cpos, u32 old_cluster,
+                                     u32 new_cluster, u32 new_len);
+ };
+@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
+ }
+ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+-                                   struct file *file,
++                                   struct inode *inode,
+                                    u32 cpos, u32 old_cluster,
+                                    u32 new_cluster, u32 new_len)
+ {
+       int ret = 0, partial;
+-      struct inode *inode = file_inode(file);
+-      struct ocfs2_caching_info *ci = INODE_CACHE(inode);
+-      struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
++      struct super_block *sb = inode->i_sb;
+       u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
+       struct page *page;
+       pgoff_t page_index;
+@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+               if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+                       BUG_ON(PageDirty(page));
+-              if (PageReadahead(page)) {
+-                      page_cache_async_readahead(mapping,
+-                                                 &file->f_ra, file,
+-                                                 page, page_index,
+-                                                 readahead_pages);
+-              }
+-
+               if (!PageUptodate(page)) {
+                       ret = block_read_full_page(page, ocfs2_get_block);
+                       if (ret) {
+@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+                       }
+               }
+-              ocfs2_map_and_dirty_page(inode, handle, from, to,
++              ocfs2_map_and_dirty_page(inode,
++                                       handle, from, to,
+                                        page, 0, &new_block);
+               mark_page_accessed(page);
+ unlock:
+@@ -3015,12 +3006,11 @@ unlock:
+ }
+ int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
+-                                  struct file *file,
++                                  struct inode *inode,
+                                   u32 cpos, u32 old_cluster,
+                                   u32 new_cluster, u32 new_len)
+ {
+       int ret = 0;
+-      struct inode *inode = file_inode(file);
+       struct super_block *sb = inode->i_sb;
+       struct ocfs2_caching_info *ci = INODE_CACHE(inode);
+       int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
+@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
+       /*If the old clusters is unwritten, no need to duplicate. */
+       if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+-              ret = context->cow_duplicate_clusters(handle, context->file,
++              ret = context->cow_duplicate_clusters(handle, context->inode,
+                                                     cpos, old, new, len);
+               if (ret) {
+                       mlog_errno(ret);
+@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
+       return ret;
+ }
+-static void ocfs2_readahead_for_cow(struct inode *inode,
+-                                  struct file *file,
+-                                  u32 start, u32 len)
+-{
+-      struct address_space *mapping;
+-      pgoff_t index;
+-      unsigned long num_pages;
+-      int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+-
+-      if (!file)
+-              return;
+-
+-      mapping = file->f_mapping;
+-      num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
+-      if (!num_pages)
+-              num_pages = 1;
+-
+-      index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
+-      page_cache_sync_readahead(mapping, &file->f_ra, file,
+-                                index, num_pages);
+-}
+-
+ /*
+  * Starting at cpos, try to CoW write_len clusters.  Don't CoW
+  * past max_cpos.  This will stop when it runs into a hole or an
+  * unrefcounted extent.
+  */
+ static int ocfs2_refcount_cow_hunk(struct inode *inode,
+-                                 struct file *file,
+                                  struct buffer_head *di_bh,
+                                  u32 cpos, u32 write_len, u32 max_cpos)
+ {
+@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
+       BUG_ON(cow_len == 0);
+-      ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
+-
+       context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
+       if (!context) {
+               ret = -ENOMEM;
+@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
+       context->ref_root_bh = ref_root_bh;
+       context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
+       context->get_clusters = ocfs2_di_get_clusters;
+-      context->file = file;
+       ocfs2_init_dinode_extent_tree(&context->data_et,
+                                     INODE_CACHE(inode), di_bh);
+@@ -3532,7 +3496,6 @@ out:
+  * clusters between cpos and cpos+write_len are safe to modify.
+  */
+ int ocfs2_refcount_cow(struct inode *inode,
+-                     struct file *file,
+                      struct buffer_head *di_bh,
+                      u32 cpos, u32 write_len, u32 max_cpos)
+ {
+@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
+                       num_clusters = write_len;
+               if (ext_flags & OCFS2_EXT_REFCOUNTED) {
+-                      ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
++                      ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
+                                                     num_clusters, max_cpos);
+                       if (ret) {
+                               mlog_errno(ret);
+diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
+index 7754608..6422bbcdb 100644
+--- a/fs/ocfs2/refcounttree.h
++++ b/fs/ocfs2/refcounttree.h
+@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
+                                         int *credits,
+                                         int *ref_blocks);
+ int ocfs2_refcount_cow(struct inode *inode,
+-                     struct file *filep, struct buffer_head *di_bh,
++                     struct buffer_head *di_bh,
+                      u32 cpos, u32 write_len, u32 max_cpos);
+ typedef int (ocfs2_post_refcount_func)(struct inode *inode,
+@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
+                            u32 cpos, u32 write_len,
+                            struct ocfs2_post_refcount *post);
+ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+-                                   struct file *file,
++                                   struct inode *inode,
+                                    u32 cpos, u32 old_cluster,
+                                    u32 new_cluster, u32 new_len);
+ int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
+-                                  struct file *file,
++                                  struct inode *inode,
+                                   u32 cpos, u32 old_cluster,
+                                   u32 new_cluster, u32 new_len);
+ int ocfs2_cow_sync_writeback(struct super_block *sb,
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index b7e74b5..19c6536 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
+                               mlog_errno(status);
+                       goto bail;
+               }
+-              atomic_inc(&osb->alloc_stats.bg_extends);
++              atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+               /* You should never ask for this much metadata */
+               BUG_ON(bits_wanted >
+@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
+               mlog_errno(status);
+               goto bail;
+       }
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       *suballoc_loc = res.sr_bg_blkno;
+       *suballoc_bit_start = res.sr_bit_offset;
+@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+       trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+                                          res->sr_bits);
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       BUG_ON(res->sr_bits != 1);
+@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
+               mlog_errno(status);
+               goto bail;
+       }
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       BUG_ON(res.sr_bits != 1);
+@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+                                                     cluster_start,
+                                                     num_clusters);
+               if (!status)
+-                      atomic_inc(&osb->alloc_stats.local_data);
++                      atomic_inc_unchecked(&osb->alloc_stats.local_data);
+       } else {
+               if (min_clusters > (osb->bitmap_cpg - 1)) {
+                       /* The only paths asking for contiguousness
+@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+                               ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+                                                                res.sr_bg_blkno,
+                                                                res.sr_bit_offset);
+-                      atomic_inc(&osb->alloc_stats.bitmap_data);
++                      atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+                       *num_clusters = res.sr_bits;
+               }
+       }
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 01b8516..579c4df 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
+                       "%10s => GlobalAllocs: %d  LocalAllocs: %d  "
+                       "SubAllocs: %d  LAWinMoves: %d  SAExtends: %d\n",
+                       "Stats",
+-                      atomic_read(&osb->alloc_stats.bitmap_data),
+-                      atomic_read(&osb->alloc_stats.local_data),
+-                      atomic_read(&osb->alloc_stats.bg_allocs),
+-                      atomic_read(&osb->alloc_stats.moves),
+-                      atomic_read(&osb->alloc_stats.bg_extends));
++                      atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++                      atomic_read_unchecked(&osb->alloc_stats.local_data),
++                      atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++                      atomic_read_unchecked(&osb->alloc_stats.moves),
++                      atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+       out += snprintf(buf + out, len - out,
+                       "%10s => State: %u  Descriptor: %llu  Size: %u bits  "
+@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
+       spin_lock_init(&osb->osb_xattr_lock);
+       ocfs2_init_steal_slots(osb);
+-      atomic_set(&osb->alloc_stats.moves, 0);
+-      atomic_set(&osb->alloc_stats.local_data, 0);
+-      atomic_set(&osb->alloc_stats.bitmap_data, 0);
+-      atomic_set(&osb->alloc_stats.bg_allocs, 0);
+-      atomic_set(&osb->alloc_stats.bg_extends, 0);
++      atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++      atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+       /* Copy the blockcheck stats from the superblock probe */
+       osb->osb_ecc_stats = *stats;
+diff --git a/fs/open.c b/fs/open.c
+index 8c74100..4239c48 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -32,6 +32,8 @@
+ #include <linux/dnotify.h>
+ #include <linux/compat.h>
++#define CREATE_TRACE_POINTS
++#include <trace/events/fs.h>
+ #include "internal.h"
+ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
+       error = locks_verify_truncate(inode, NULL, length);
+       if (!error)
+               error = security_path_truncate(path);
++      if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
++              error = -EACCES;
+       if (!error)
+               error = do_truncate(path->dentry, length, 0, NULL);
+@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+       error = locks_verify_truncate(inode, f.file, length);
+       if (!error)
+               error = security_path_truncate(&f.file->f_path);
++      if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
++              error = -EACCES;
+       if (!error)
+               error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
+       sb_end_write(inode->i_sb);
+@@ -360,6 +366,9 @@ retry:
+       if (__mnt_is_readonly(path.mnt))
+               res = -EROFS;
++      if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
++              res = -EACCES;
++
+ out_path_release:
+       path_put(&path);
+       if (retry_estale(res, lookup_flags)) {
+@@ -391,6 +400,8 @@ retry:
+       if (error)
+               goto dput_and_out;
++      gr_log_chdir(path.dentry, path.mnt);
++
+       set_fs_pwd(current->fs, &path);
+ dput_and_out:
+@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+               goto out_putf;
+       error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
++
++      if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
++              error = -EPERM;
++
++      if (!error)
++              gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
++
+       if (!error)
+               set_fs_pwd(current->fs, &f.file->f_path);
+ out_putf:
+@@ -449,7 +467,13 @@ retry:
+       if (error)
+               goto dput_and_out;
++      if (gr_handle_chroot_chroot(path.dentry, path.mnt))
++              goto dput_and_out;
++
+       set_fs_root(current->fs, &path);
++
++      gr_handle_chroot_chdir(&path);
++
+       error = 0;
+ dput_and_out:
+       path_put(&path);
+@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
+       if (error)
+               return error;
+       mutex_lock(&inode->i_mutex);
++
++      if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
++              error = -EACCES;
++              goto out_unlock;
++      }
++      if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
++              error = -EACCES;
++              goto out_unlock;
++      }
++
+       error = security_path_chmod(path, mode);
+       if (error)
+               goto out_unlock;
+@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+       uid = make_kuid(current_user_ns(), user);
+       gid = make_kgid(current_user_ns(), group);
++      if (!gr_acl_handle_chown(path->dentry, path->mnt))
++              return -EACCES;
++
+       newattrs.ia_valid =  ATTR_CTIME;
+       if (user != (uid_t) -1) {
+               if (!uid_valid(uid))
+@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+                       } else {
+                               fsnotify_open(f);
+                               fd_install(fd, f);
++                              trace_do_sys_open(tmp->name, flags, mode);
+                       }
+               }
+               putname(tmp);
+diff --git a/fs/pipe.c b/fs/pipe.c
+index d2c45e1..009fe1c 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
+ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
+ {
+-      if (pipe->files)
++      if (atomic_read(&pipe->files))
+               mutex_lock_nested(&pipe->mutex, subclass);
+ }
+@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
+ void pipe_unlock(struct pipe_inode_info *pipe)
+ {
+-      if (pipe->files)
++      if (atomic_read(&pipe->files))
+               mutex_unlock(&pipe->mutex);
+ }
+ EXPORT_SYMBOL(pipe_unlock);
+@@ -449,9 +449,9 @@ redo:
+               }
+               if (bufs)       /* More to do? */
+                       continue;
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       break;
+-              if (!pipe->waiting_writers) {
++              if (!atomic_read(&pipe->waiting_writers)) {
+                       /* syscall merging: Usually we must not sleep
+                        * if O_NONBLOCK is set, or if we got some data.
+                        * But if a writer sleeps in kernel space, then
+@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+       ret = 0;
+       __pipe_lock(pipe);
+-      if (!pipe->readers) {
++      if (!atomic_read(&pipe->readers)) {
+               send_sig(SIGPIPE, current, 0);
+               ret = -EPIPE;
+               goto out;
+@@ -562,7 +562,7 @@ redo1:
+       for (;;) {
+               int bufs;
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -653,9 +653,9 @@ redo2:
+                       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+                       do_wakeup = 0;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+ out:
+       __pipe_unlock(pipe);
+@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+       mask = 0;
+       if (filp->f_mode & FMODE_READ) {
+               mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+-              if (!pipe->writers && filp->f_version != pipe->w_counter)
++              if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+                       mask |= POLLHUP;
+       }
+@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+                * Most Unices do not set POLLERR for FIFOs but on Linux they
+                * behave exactly like pipes for poll().
+                */
+-              if (!pipe->readers)
++              if (!atomic_read(&pipe->readers))
+                       mask |= POLLERR;
+       }
+@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
+       __pipe_lock(pipe);
+       if (file->f_mode & FMODE_READ)
+-              pipe->readers--;
++              atomic_dec(&pipe->readers);
+       if (file->f_mode & FMODE_WRITE)
+-              pipe->writers--;
++              atomic_dec(&pipe->writers);
+-      if (pipe->readers || pipe->writers) {
++      if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
+               wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+       }
+       spin_lock(&inode->i_lock);
+-      if (!--pipe->files) {
++      if (atomic_dec_and_test(&pipe->files)) {
+               inode->i_pipe = NULL;
+               kill = 1;
+       }
+@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
+       kfree(pipe);
+ }
+-static struct vfsmount *pipe_mnt __read_mostly;
++struct vfsmount *pipe_mnt __read_mostly;
+ /*
+  * pipefs_dname() is called from d_path().
+@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
+               goto fail_iput;
+       inode->i_pipe = pipe;
+-      pipe->files = 2;
+-      pipe->readers = pipe->writers = 1;
++      atomic_set(&pipe->files, 2);
++      atomic_set(&pipe->readers, 1);
++      atomic_set(&pipe->writers, 1);
+       inode->i_fop = &pipefifo_fops;
+       /*
+@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
+       spin_lock(&inode->i_lock);
+       if (inode->i_pipe) {
+               pipe = inode->i_pipe;
+-              pipe->files++;
++              atomic_inc(&pipe->files);
+               spin_unlock(&inode->i_lock);
+       } else {
+               spin_unlock(&inode->i_lock);
+               pipe = alloc_pipe_info();
+               if (!pipe)
+                       return -ENOMEM;
+-              pipe->files = 1;
++              atomic_set(&pipe->files, 1);
+               spin_lock(&inode->i_lock);
+               if (unlikely(inode->i_pipe)) {
+-                      inode->i_pipe->files++;
++                      atomic_inc(&inode->i_pipe->files);
+                       spin_unlock(&inode->i_lock);
+                       free_pipe_info(pipe);
+                       pipe = inode->i_pipe;
+@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  opened, even when there is no process writing the FIFO.
+        */
+               pipe->r_counter++;
+-              if (pipe->readers++ == 0)
++              if (atomic_inc_return(&pipe->readers) == 1)
+                       wake_up_partner(pipe);
+-              if (!is_pipe && !pipe->writers) {
++              if (!is_pipe && !atomic_read(&pipe->writers)) {
+                       if ((filp->f_flags & O_NONBLOCK)) {
+                               /* suppress POLLHUP until we have
+                                * seen a writer */
+@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  errno=ENXIO when there is no process reading the FIFO.
+        */
+               ret = -ENXIO;
+-              if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
++              if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+                       goto err;
+               pipe->w_counter++;
+-              if (!pipe->writers++)
++              if (atomic_inc_return(&pipe->writers) == 1)
+                       wake_up_partner(pipe);
+-              if (!is_pipe && !pipe->readers) {
++              if (!is_pipe && !atomic_read(&pipe->readers)) {
+                       if (wait_for_partner(pipe, &pipe->r_counter))
+                               goto err_wr;
+               }
+@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  the process can at least talk to itself.
+        */
+-              pipe->readers++;
+-              pipe->writers++;
++              atomic_inc(&pipe->readers);
++              atomic_inc(&pipe->writers);
+               pipe->r_counter++;
+               pipe->w_counter++;
+-              if (pipe->readers == 1 || pipe->writers == 1)
++              if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+                       wake_up_partner(pipe);
+               break;
+@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
+       return 0;
+ err_rd:
+-      if (!--pipe->readers)
++      if (atomic_dec_and_test(&pipe->readers))
+               wake_up_interruptible(&pipe->wait);
+       ret = -ERESTARTSYS;
+       goto err;
+ err_wr:
+-      if (!--pipe->writers)
++      if (atomic_dec_and_test(&pipe->writers))
+               wake_up_interruptible(&pipe->wait);
+       ret = -ERESTARTSYS;
+       goto err;
+ err:
+       spin_lock(&inode->i_lock);
+-      if (!--pipe->files) {
++      if (atomic_dec_and_test(&pipe->files)) {
+               inode->i_pipe = NULL;
+               kill = 1;
+       }
+diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
+index 15af622..0e9f4467 100644
+--- a/fs/proc/Kconfig
++++ b/fs/proc/Kconfig
+@@ -30,12 +30,12 @@ config PROC_FS
+ config PROC_KCORE
+       bool "/proc/kcore support" if !ARM
+-      depends on PROC_FS && MMU
++      depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
+ config PROC_VMCORE
+       bool "/proc/vmcore support"
+-      depends on PROC_FS && CRASH_DUMP
+-      default y
++      depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
++      default n
+         help
+         Exports the dump image of crashed kernel in ELF format.
+@@ -59,8 +59,8 @@ config PROC_SYSCTL
+         limited in memory.
+ config PROC_PAGE_MONITOR
+-      default y
+-      depends on PROC_FS && MMU
++      default n
++      depends on PROC_FS && MMU && !GRKERNSEC
+       bool "Enable /proc page monitoring" if EXPERT
+       help
+         Various /proc files exist to monitor process memory utilization:
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index cbd0f1b..adec3f0 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/grsecurity.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+       seq_putc(m, '\n');
+ }
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++      if (p->mm)
++              seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++                         p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++                         p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++                         p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++                         p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++                         p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++      else
++              seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task)
+ {
+@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+       task_cpus_allowed(m, task);
+       cpuset_task_status_allowed(m, task);
+       task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      task_pax(m, task);
++#endif
++
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++      task_grsec_rbac(m, task);
++#endif
++
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task, int whole)
+ {
+@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       char tcomm[sizeof(task->comm)];
+       unsigned long flags;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("stat");
++              return 0;
++      }
++#endif
++
+       state = *get_task_state(task);
+       vsize = eip = esp = 0;
+       permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+               gtime = task_gtime(task);
+       }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (PAX_RAND_FLAGS(mm)) {
++              eip = 0;
++              esp = 0;
++              wchan = 0;
++      }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      wchan = 0;
++      eip =0;
++      esp =0;
++#endif
++
+       /* scale priority and nice values from timeslices to -20..20 */
+       /* to make it look like a "normal" Unix priority/nice value  */
+       priority = task_prio(task);
+@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       seq_put_decimal_ull(m, ' ', vsize);
+       seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
+       seq_put_decimal_ull(m, ' ', rsslim);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
++#else
+       seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
+       seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
+       seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
++#endif
+       seq_put_decimal_ull(m, ' ', esp);
+       seq_put_decimal_ull(m, ' ', eip);
+       /* The signal information here is obsolete.
+@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
+       seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
+-      if (mm && permitted) {
++      if (mm && permitted
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              && !PAX_RAND_FLAGS(mm)
++#endif
++         ) {
+               seq_put_decimal_ull(m, ' ', mm->start_data);
+               seq_put_decimal_ull(m, ' ', mm->end_data);
+               seq_put_decimal_ull(m, ' ', mm->start_brk);
+@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task)
+ {
+       unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
+-      struct mm_struct *mm = get_task_mm(task);
++      struct mm_struct *mm;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("statm");
++              return 0;
++      }
++#endif
++      mm = get_task_mm(task);
+       if (mm) {
+               size = task_statm(mm, &shared, &text, &data, &resident);
+               mmput(mm);
+@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char *buffer)
++{
++      return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
++}
++#endif
++
+ #ifdef CONFIG_CHECKPOINT_RESTORE
+ static struct pid *
+ get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index c3834da..b402b2b 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -113,6 +113,14 @@ struct pid_entry {
+       union proc_op op;
+ };
++struct getdents_callback {
++      struct linux_dirent __user * current_dir;
++      struct linux_dirent __user * previous;
++      struct file * file;
++      int count;
++      int error;
++};
++
+ #define NOD(NAME, MODE, IOP, FOP, OP) {                       \
+       .name = (NAME),                                 \
+       .len  = sizeof(NAME) - 1,                       \
+@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+       if (!mm->arg_end)
+               goto out_mm;    /* Shh! No looking before we're done */
++      if (gr_acl_handle_procpidmem(task))
++              goto out_mm;
++
+       len = mm->arg_end - mm->arg_start;
+  
+       if (len > PAGE_SIZE)
+@@ -237,12 +248,28 @@ out:
+       return res;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+       int res = PTR_ERR(mm);
+       if (mm && !IS_ERR(mm)) {
+               unsigned int nwords = 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              /* allow if we're currently ptracing this task */
++              if (PAX_RAND_FLAGS(mm) &&
++                  (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
++                      mmput(mm);
++                      return 0;
++              }
++#endif
++
+               do {
+                       nwords += 2;
+               } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ }
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /*
+  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+  * Returns the resolved symbol.  If that fails, simply return the address.
+@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
+       mutex_unlock(&task->signal->cred_guard_mutex);
+ }
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ #define MAX_STACK_TRACE_DEPTH 64
+@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
+       return count;
+ }
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ {
+       long nr;
+@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ /************************************************************************/
+ /* permission checks */
+-static int proc_fd_access_allowed(struct inode *inode)
++static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
+ {
+       struct task_struct *task;
+       int allowed = 0;
+@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
+        */
+       task = get_proc_task(inode);
+       if (task) {
+-              allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++              if (log)
++                      allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++              else
++                      allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+               put_task_struct(task);
+       }
+       return allowed;
+@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
+                                struct task_struct *task,
+                                int hide_pid_min)
+ {
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              return false;
++
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      rcu_read_lock();
++      {
++              const struct cred *tmpcred = current_cred();
++              const struct cred *cred = __task_cred(task);
++
++              if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      || in_group_p(grsec_proc_gid)
++#endif
++              ) {
++                      rcu_read_unlock();
++                      return true;
++              }
++      }
++      rcu_read_unlock();
++
++      if (!pid->hide_pid)
++              return false;
++#endif
++
+       if (pid->hide_pid < hide_pid_min)
+               return true;
+       if (in_group_p(pid->pid_gid))
+               return true;
++
+       return ptrace_may_access(task, PTRACE_MODE_READ);
+ }
+@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
+       put_task_struct(task);
+       if (!has_perms) {
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              {
++#else
+               if (pid->hide_pid == 2) {
++#endif
+                       /*
+                        * Let's make getdents(), stat(), and open()
+                        * consistent with each other.  If a process
+@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
+       if (!task)
+               return -ESRCH;
++      if (gr_acl_handle_procpidmem(task)) {
++              put_task_struct(task);
++              return -EPERM;
++      }
++
+       mm = mm_access(task, mode);
+       put_task_struct(task);
+@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
+       file->private_data = mm;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      file->f_version = current->exec_id;
++#endif
++
+       return 0;
+ }
+@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+       ssize_t copied;
+       char *page;
++#ifdef CONFIG_GRKERNSEC
++      if (write)
++              return -EPERM;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (file->f_version != current->exec_id) {
++              gr_log_badprocpid("mem");
++              return 0;
++      }
++#endif
++
+       if (!mm)
+               return 0;
+@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+               goto free;
+       while (count > 0) {
+-              int this_len = min_t(int, count, PAGE_SIZE);
++              ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
+               if (write && copy_from_user(page, buf, this_len)) {
+                       copied = -EFAULT;
+@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+       if (!mm)
+               return 0;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (file->f_version != current->exec_id) {
++              gr_log_badprocpid("environ");
++              return 0;
++      }
++#endif
++
+       page = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!page)
+               return -ENOMEM;
+@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+               goto free;
+       while (count > 0) {
+               size_t this_len, max_len;
+-              int retval;
++              ssize_t retval;
+               if (src >= (mm->env_end - mm->env_start))
+                       break;
+@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+       int error = -EACCES;
+       /* Are we allowed to snoop on the tasks file descriptors? */
+-      if (!proc_fd_access_allowed(inode))
++      if (!proc_fd_access_allowed(inode, 0))
+               goto out;
+       error = PROC_I(inode)->op.proc_get_link(dentry, &path);
+@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
+       struct path path;
+       /* Are we allowed to snoop on the tasks file descriptors? */
+-      if (!proc_fd_access_allowed(inode))
+-              goto out;
++      /* logging this is needed for learning on chromium to work properly,
++         but we don't want to flood the logs from 'ps' which does a readlink
++         on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
++         CAP_SYS_PTRACE as it's not necessary for its basic functionality
++       */
++      if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
++              if (!proc_fd_access_allowed(inode,0))
++                      goto out;
++      } else {
++              if (!proc_fd_access_allowed(inode,1))
++                      goto out;
++      }
+       error = PROC_I(inode)->op.proc_get_link(dentry, &path);
+       if (error)
+@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
+               rcu_read_lock();
+               cred = __task_cred(task);
+               inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++              inode->i_gid = grsec_proc_gid;
++#else
+               inode->i_gid = cred->egid;
++#endif
+               rcu_read_unlock();
+       }
+       security_task_to_inode(task, inode);
+@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+                       return -ENOENT;
+               }
+               if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+                   task_dumpable(task)) {
+                       cred = __task_cred(task);
+                       stat->uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      stat->gid = grsec_proc_gid;
++#else
+                       stat->gid = cred->egid;
++#endif
+               }
+       }
+       rcu_read_unlock();
+@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
+       if (task) {
+               if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+                   task_dumpable(task)) {
+                       rcu_read_lock();
+                       cred = __task_cred(task);
+                       inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      inode->i_gid = grsec_proc_gid;
++#else
+                       inode->i_gid = cred->egid;
++#endif
+                       rcu_read_unlock();
+               } else {
+                       inode->i_uid = GLOBAL_ROOT_UID;
+@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
+       if (!task)
+               goto out_no_task;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out;
++
+       /*
+        * Yes, it does not scale. And it should not. Don't add
+        * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
+       if (!task)
+               goto out_no_task;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out;
++
+       ret = 0;
+       i = filp->f_pos;
+       switch (i) {
+@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+       REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
+ #endif
+       REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+       INF("syscall",    S_IRUGO, proc_pid_syscall),
+ #endif
+       INF("cmdline",    S_IRUGO, proc_pid_cmdline),
+@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+       DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       INF("wchan",      S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("stack",      S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_HARDWALL
+       INF("hardwall",   S_IRUGO, proc_pid_hardwall),
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++      INF("ipaddr",     S_IRUSR, proc_pid_ipaddr),
++#endif
+ #ifdef CONFIG_USER_NS
+       REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
+       REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
+       if (!inode)
+               goto out;
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      inode->i_gid = grsec_proc_gid;
++      inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
++#else
+       inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+       inode->i_op = &proc_tgid_base_inode_operations;
+       inode->i_fop = &proc_tgid_base_operations;
+       inode->i_flags|=S_IMMUTABLE;
+@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
+       if (!task)
+               goto out;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out_put_task;
++
+       result = proc_pid_instantiate(dir, dentry, task, NULL);
++out_put_task:
+       put_task_struct(task);
+ out:
+       return result;
+@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
+ static int fake_filldir(void *buf, const char *name, int namelen,
+                       loff_t offset, u64 ino, unsigned d_type)
+ {
++      struct getdents_callback * __buf = (struct getdents_callback *) buf;
++      __buf->error = -EINVAL;
+       return 0;
+ }
+@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
+       REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+ #endif
+       REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+       INF("syscall",   S_IRUGO, proc_pid_syscall),
+ #endif
+       INF("cmdline",   S_IRUGO, proc_pid_cmdline),
+@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+       DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       INF("wchan",     S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("stack",      S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
+index 82676e3..5f8518a 100644
+--- a/fs/proc/cmdline.c
++++ b/fs/proc/cmdline.c
+@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
+ static int __init proc_cmdline_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
++#else
+       proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
++#endif
+       return 0;
+ }
+ module_init(proc_cmdline_init);
+diff --git a/fs/proc/devices.c b/fs/proc/devices.c
+index b143471..bb105e5 100644
+--- a/fs/proc/devices.c
++++ b/fs/proc/devices.c
+@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
+ static int __init proc_devices_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
++#else
+       proc_create("devices", 0, NULL, &proc_devinfo_operations);
++#endif
+       return 0;
+ }
+ module_init(proc_devices_init);
+diff --git a/fs/proc/fd.c b/fs/proc/fd.c
+index d7a4a28..0201742 100644
+--- a/fs/proc/fd.c
++++ b/fs/proc/fd.c
+@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
+       if (!task)
+               return -ENOENT;
+-      files = get_files_struct(task);
++      if (!gr_acl_handle_procpidmem(task))
++              files = get_files_struct(task);
+       put_task_struct(task);
+       if (files) {
+@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
+  */
+ int proc_fd_permission(struct inode *inode, int mask)
+ {
++      struct task_struct *task;
+       int rv = generic_permission(inode, mask);
+-      if (rv == 0)
+-              return 0;
++
+       if (task_pid(current) == proc_pid(inode))
+               rv = 0;
++
++      task = get_proc_task(inode);
++      if (task == NULL)
++              return rv;
++
++      if (gr_acl_handle_procpidmem(task))
++              rv = -EACCES;
++
++      put_task_struct(task);
++
+       return rv;
+ }
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 073aea6..0630370 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -23,11 +23,17 @@
+ #include <linux/slab.h>
+ #include <linux/mount.h>
+ #include <linux/magic.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
++#ifdef CONFIG_PROC_SYSCTL
++extern const struct inode_operations proc_sys_inode_operations;
++extern const struct inode_operations proc_sys_dir_operations;
++#endif
++
+ static void proc_evict_inode(struct inode *inode)
+ {
+       struct proc_dir_entry *de;
+@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
+       ns = PROC_I(inode)->ns.ns;
+       if (ns_ops && ns)
+               ns_ops->put(ns);
++
++#ifdef CONFIG_PROC_SYSCTL
++      if (inode->i_op == &proc_sys_inode_operations ||
++          inode->i_op == &proc_sys_dir_operations)
++              gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
++#endif
++
+ }
+ static struct kmem_cache * proc_inode_cachep;
+@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+               if (de->mode) {
+                       inode->i_mode = de->mode;
+                       inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      inode->i_gid = grsec_proc_gid;
++#else
+                       inode->i_gid = de->gid;
++#endif
+               }
+               if (de->size)
+                       inode->i_size = de->size;
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index d600fb0..3b495fe 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
+                          struct pid *, struct task_struct *);
+ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
+                         struct pid *, struct task_struct *);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
++#endif
+ /*
+  * base.c
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 0a22194..a9fc8c1 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+        * the addresses in the elf_phdr on our list.
+        */
+       start = kc_offset_to_vaddr(*fpos - elf_buflen);
+-      if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
++      tsz = PAGE_SIZE - (start & ~PAGE_MASK);
++      if (tsz > buflen)
+               tsz = buflen;
+-              
++
+       while (buflen) {
+               struct kcore_list *m;
+@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+                       kfree(elf_buf);
+               } else {
+                       if (kern_addr_valid(start)) {
+-                              unsigned long n;
++                              char *elf_buf;
++                              mm_segment_t oldfs;
+-                              n = copy_to_user(buffer, (char *)start, tsz);
+-                              /*
+-                               * We cannot distinguish between fault on source
+-                               * and fault on destination. When this happens
+-                               * we clear too and hope it will trigger the
+-                               * EFAULT again.
+-                               */
+-                              if (n) { 
+-                                      if (clear_user(buffer + tsz - n,
+-                                                              n))
++                              elf_buf = kmalloc(tsz, GFP_KERNEL);
++                              if (!elf_buf)
++                                      return -ENOMEM;
++                              oldfs = get_fs();
++                              set_fs(KERNEL_DS);
++                              if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
++                                      set_fs(oldfs);
++                                      if (copy_to_user(buffer, elf_buf, tsz)) {
++                                              kfree(elf_buf);
+                                               return -EFAULT;
++                                      }
+                               }
++                              set_fs(oldfs);
++                              kfree(elf_buf);
+                       } else {
+                               if (clear_user(buffer, tsz))
+                                       return -EFAULT;
+@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+ static int open_kcore(struct inode *inode, struct file *filp)
+ {
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++      return -EPERM;
++#endif
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+       if (kcore_need_update)
+diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
+index 5aa847a..f77c8d4 100644
+--- a/fs/proc/meminfo.c
++++ b/fs/proc/meminfo.c
+@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
+               vmi.used >> 10,
+               vmi.largest_chunk >> 10
+ #ifdef CONFIG_MEMORY_FAILURE
+-              ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
++              ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+ #endif
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
+index ccfd99b..1b7e255 100644
+--- a/fs/proc/nommu.c
++++ b/fs/proc/nommu.c
+@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
+               if (len < 1)
+                       len = 1;
+               seq_printf(m, "%*c", len, ' ');
+-              seq_path(m, &file->f_path, "");
++              seq_path(m, &file->f_path, "\n\\");
+       }
+       seq_putc(m, '\n');
+diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
+index 986e832..6e8e859 100644
+--- a/fs/proc/proc_net.c
++++ b/fs/proc/proc_net.c
+@@ -23,6 +23,7 @@
+ #include <linux/nsproxy.h>
+ #include <net/net_namespace.h>
+ #include <linux/seq_file.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
+       struct task_struct *task;
+       struct nsproxy *ns;
+       struct net *net = NULL;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      const struct cred *cred = current_cred();
++#endif
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
++              return net;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
++              return net;
++#endif
+       rcu_read_lock();
+       task = pid_task(proc_pid(dir), PIDTYPE_PID);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index ac05f33..1e6dc7e 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -13,11 +13,15 @@
+ #include <linux/module.h>
+ #include "internal.h"
++extern int gr_handle_chroot_sysctl(const int op);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++                              const int op);
++
+ static const struct dentry_operations proc_sys_dentry_operations;
+ static const struct file_operations proc_sys_file_operations;
+-static const struct inode_operations proc_sys_inode_operations;
++const struct inode_operations proc_sys_inode_operations;
+ static const struct file_operations proc_sys_dir_file_operations;
+-static const struct inode_operations proc_sys_dir_operations;
++const struct inode_operations proc_sys_dir_operations;
+ void proc_sys_poll_notify(struct ctl_table_poll *poll)
+ {
+@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+       err = NULL;
+       d_set_d_op(dentry, &proc_sys_dentry_operations);
++
++      gr_handle_proc_create(dentry, inode);
++
+       d_add(dentry, inode);
+ out:
+@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+       struct inode *inode = file_inode(filp);
+       struct ctl_table_header *head = grab_header(inode);
+       struct ctl_table *table = PROC_I(inode)->sysctl_entry;
++      int op = write ? MAY_WRITE : MAY_READ;
+       ssize_t error;
+       size_t res;
+@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+        * and won't be until we finish.
+        */
+       error = -EPERM;
+-      if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
++      if (sysctl_perm(head, table, op))
+               goto out;
+       /* if that can happen at all, it should be -EINVAL, not -EISDIR */
+@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+       if (!table->proc_handler)
+               goto out;
++#ifdef CONFIG_GRKERNSEC
++      error = -EPERM;
++      if (gr_handle_chroot_sysctl(op))
++              goto out;
++      dget(filp->f_path.dentry);
++      if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
++              dput(filp->f_path.dentry);
++              goto out;
++      }
++      dput(filp->f_path.dentry);
++      if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
++              goto out;
++      if (write && !capable(CAP_SYS_ADMIN))
++              goto out;
++#endif
++
+       /* careful: calling conventions are nasty here */
+       res = count;
+       error = table->proc_handler(table, write, buf, &res, ppos);
+@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
+                               return -ENOMEM;
+                       } else {
+                               d_set_d_op(child, &proc_sys_dentry_operations);
++
++                              gr_handle_proc_create(child, inode);
++
+                               d_add(child, inode);
+                       }
+               } else {
+@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
+       if ((*pos)++ < file->f_pos)
+               return 0;
++      if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
++              return 0;
++
+       if (unlikely(S_ISLNK(table->mode)))
+               res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
+       else
+@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
+       if (IS_ERR(head))
+               return PTR_ERR(head);
++      if (table && !gr_acl_handle_hidden_file(dentry, mnt))
++              return -ENOENT;
++
+       generic_fillattr(inode, stat);
+       if (table)
+               stat->mode = (stat->mode & S_IFMT) | table->mode;
+@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
+       .llseek         = generic_file_llseek,
+ };
+-static const struct inode_operations proc_sys_inode_operations = {
++const struct inode_operations proc_sys_inode_operations = {
+       .permission     = proc_sys_permission,
+       .setattr        = proc_sys_setattr,
+       .getattr        = proc_sys_getattr,
+ };
+-static const struct inode_operations proc_sys_dir_operations = {
++const struct inode_operations proc_sys_dir_operations = {
+       .lookup         = proc_sys_lookup,
+       .permission     = proc_sys_permission,
+       .setattr        = proc_sys_setattr,
+@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
+ static struct ctl_dir *new_dir(struct ctl_table_set *set,
+                              const char *name, int namelen)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       struct ctl_dir *new;
+       struct ctl_node *node;
+       char *new_name;
+@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
+               return NULL;
+       node = (struct ctl_node *)(new + 1);
+-      table = (struct ctl_table *)(node + 1);
++      table = (ctl_table_no_const *)(node + 1);
+       new_name = (char *)(table + 2);
+       memcpy(new_name, name, namelen);
+       new_name[namelen] = '\0';
+@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
+ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
+       struct ctl_table_root *link_root)
+ {
+-      struct ctl_table *link_table, *entry, *link;
++      ctl_table_no_const *link_table, *link;
++      struct ctl_table *entry;
+       struct ctl_table_header *links;
+       struct ctl_node *node;
+       char *link_name;
+@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
+               return NULL;
+       node = (struct ctl_node *)(links + 1);
+-      link_table = (struct ctl_table *)(node + nr_entries);
++      link_table = (ctl_table_no_const *)(node + nr_entries);
+       link_name = (char *)&link_table[nr_entries + 1];
+       for (link = link_table, entry = table; entry->procname; link++, entry++) {
+@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+       struct ctl_table_header ***subheader, struct ctl_table_set *set,
+       struct ctl_table *table)
+ {
+-      struct ctl_table *ctl_table_arg = NULL;
+-      struct ctl_table *entry, *files;
++      ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
++      struct ctl_table *entry;
+       int nr_files = 0;
+       int nr_dirs = 0;
+       int err = -ENOMEM;
+@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+                       nr_files++;
+       }
+-      files = table;
+       /* If there are mixed files and directories we need a new table */
+       if (nr_dirs && nr_files) {
+-              struct ctl_table *new;
++              ctl_table_no_const *new;
+               files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
+                               GFP_KERNEL);
+               if (!files)
+@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+       /* Register everything except a directory full of subdirectories */
+       if (nr_files || !nr_dirs) {
+               struct ctl_table_header *header;
+-              header = __register_sysctl_table(set, path, files);
++              header = __register_sysctl_table(set, path, files ? files : table);
+               if (!header) {
+                       kfree(ctl_table_arg);
+                       goto out;
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index 41a6ea9..23eaa92 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -182,7 +182,15 @@ void __init proc_root_init(void)
+ #ifdef CONFIG_PROC_DEVICETREE
+       proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+       proc_mkdir("bus", NULL);
++#endif
+       proc_sys_init();
+ }
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index 6b6a993..807cccc 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+ static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+                               void *cookie)
+ {
+-      char *s = nd_get_link(nd);
++      const char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+ }
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 65fc60a..350cc48 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -11,12 +11,19 @@
+ #include <linux/rmap.h>
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
++#include <linux/grsecurity.h>
+ #include <asm/elf.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include "internal.h"
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ {
+       unsigned long data, text, lib, swap;
+@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+               "VmExe:\t%8lu kB\n"
+               "VmLib:\t%8lu kB\n"
+               "VmPTE:\t%8lu kB\n"
+-              "VmSwap:\t%8lu kB\n",
+-              hiwater_vm << (PAGE_SHIFT-10),
++              "VmSwap:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++              "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++              ,hiwater_vm << (PAGE_SHIFT-10),
+               total_vm << (PAGE_SHIFT-10),
+               mm->locked_vm << (PAGE_SHIFT-10),
+               mm->pinned_vm << (PAGE_SHIFT-10),
+@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+               data << (PAGE_SHIFT-10),
+               mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+               (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+-              swap << (PAGE_SHIFT-10));
++              swap << (PAGE_SHIFT-10)
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
++              , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
++#else
++              , mm->context.user_cs_base
++              , mm->context.user_cs_limit
++#endif
++#endif
++
++      );
+ }
+ unsigned long task_vsize(struct mm_struct *mm)
+@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+               pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+       }
+-      /* We don't show the stack guard page in /proc/maps */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
++      end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
++#else
+       start = vma->vm_start;
+-      if (stack_guard_page_start(vma, start))
+-              start += PAGE_SIZE;
+       end = vma->vm_end;
+-      if (stack_guard_page_end(vma, end))
+-              end -= PAGE_SIZE;
++#endif
+       seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+                       start,
+@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+                       flags & VM_WRITE ? 'w' : '-',
+                       flags & VM_EXEC ? 'x' : '-',
+                       flags & VM_MAYSHARE ? 's' : 'p',
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++                      PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
++#else
+                       pgoff,
++#endif
+                       MAJOR(dev), MINOR(dev), ino, &len);
+       /*
+@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+        */
+       if (file) {
+               pad_len_spaces(m, len);
+-              seq_path(m, &file->f_path, "\n");
++              seq_path(m, &file->f_path, "\n\\");
+               goto done;
+       }
+@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+                        * Thread stack in /proc/PID/task/TID/maps or
+                        * the main process stack.
+                        */
+-                      if (!is_pid || (vma->vm_start <= mm->start_stack &&
+-                          vma->vm_end >= mm->start_stack)) {
++                      if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
++                          (vma->vm_start <= mm->start_stack &&
++                           vma->vm_end >= mm->start_stack)) {
+                               name = "[stack]";
+                       } else {
+                               /* Thread stack in /proc/PID/maps */
+@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
+       struct proc_maps_private *priv = m->private;
+       struct task_struct *task = priv->task;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("maps");
++              return 0;
++      }
++#endif
++
+       show_map_vma(m, vma, is_pid);
+       if (m->count < m->size)  /* vma is copied successfully */
+@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
+               .private = &mss,
+       };
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("smaps");
++              return 0;
++      }
++#endif
+       memset(&mss, 0, sizeof mss);
+-      mss.vma = vma;
+-      /* mmap_sem is held in m_start */
+-      if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+-              walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+-
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (!PAX_RAND_FLAGS(vma->vm_mm)) {
++#endif
++              mss.vma = vma;
++              /* mmap_sem is held in m_start */
++              if (vma->vm_mm && !is_vm_hugetlb_page(vma))
++                      walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      }
++#endif
+       show_map_vma(m, vma, is_pid);
+       seq_printf(m,
+@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
+                  "KernelPageSize: %8lu kB\n"
+                  "MMUPageSize:    %8lu kB\n"
+                  "Locked:         %8lu kB\n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++                 PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
++#else
+                  (vma->vm_end - vma->vm_start) >> 10,
++#endif
+                  mss.resident >> 10,
+                  (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+                  mss.shared_clean  >> 10,
+@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
+       int n;
+       char buffer[50];
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("numa_maps");
++              return 0;
++      }
++#endif
++
+       if (!mm)
+               return 0;
+@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
+       mpol_to_str(buffer, sizeof(buffer), pol);
+       mpol_cond_put(pol);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
++#else
+       seq_printf(m, "%08lx %s", vma->vm_start, buffer);
++#endif
+       if (file) {
+               seq_printf(m, " file=");
+-              seq_path(m, &file->f_path, "\n\t= ");
++              seq_path(m, &file->f_path, "\n\t\\= ");
+       } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+               seq_printf(m, " heap");
+       } else {
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 56123a6..5a2f6ec 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+       else
+               bytes += kobjsize(mm);
+       
+-      if (current->fs && current->fs->users > 1)
++      if (current->fs && atomic_read(&current->fs->users) > 1)
+               sbytes += kobjsize(current->fs);
+       else
+               bytes += kobjsize(current->fs);
+@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
+       if (file) {
+               pad_len_spaces(m, len);
+-              seq_path(m, &file->f_path, "");
++              seq_path(m, &file->f_path, "\n\\");
+       } else if (mm) {
+               pid_t tid = vm_is_stack(priv->task, vma, is_pid);
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 17f7e08..e4b1529 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
+                       nr_bytes = count;
+               /* If pfn is not ram, return zeros for sparse dump files */
+-              if (pfn_is_ram(pfn) == 0)
+-                      memset(buf, 0, nr_bytes);
+-              else {
++              if (pfn_is_ram(pfn) == 0) {
++                      if (userbuf) {
++                              if (clear_user((char __force_user *)buf, nr_bytes))
++                                      return -EFAULT;
++                      } else
++                              memset(buf, 0, nr_bytes);
++              } else {
+                       tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+                                               offset, userbuf);
+                       if (tmp < 0)
+@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
+               if (tsz > nr_bytes)
+                       tsz = nr_bytes;
+-              tmp = read_from_oldmem(buffer, tsz, &start, 1);
++              tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
+               if (tmp < 0)
+                       return tmp;
+               buflen -= tsz;
+diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
+index b00fcc9..e0c6381 100644
+--- a/fs/qnx6/qnx6.h
++++ b/fs/qnx6/qnx6.h
+@@ -74,7 +74,7 @@ enum {
+       BYTESEX_BE,
+ };
+-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
++static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_LE)
+               return le64_to_cpu((__force __le64)n);
+@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
+               return (__force __fs64)cpu_to_be64(n);
+ }
+-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_LE)
+               return le32_to_cpu((__force __le32)n);
+diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
+index 16e8abb..2dcf914 100644
+--- a/fs/quota/netlink.c
++++ b/fs/quota/netlink.c
+@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
+ void quota_send_warning(struct kqid qid, dev_t dev,
+                       const char warntype)
+ {
+-      static atomic_t seq;
++      static atomic_unchecked_t seq;
+       struct sk_buff *skb;
+       void *msg_head;
+       int ret;
+@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
+                 "VFS: Not enough memory to send quota warning.\n");
+               return;
+       }
+-      msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
++      msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
+                       &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+       if (!msg_head) {
+               printk(KERN_ERR
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 2cefa41..c7e2fe0 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
+       old_fs = get_fs();
+       set_fs(get_ds());
+-      p = (__force const char __user *)buf;
++      p = (const char __force_user *)buf;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       if (file->f_op->write)
+diff --git a/fs/readdir.c b/fs/readdir.c
+index fee38e0..12fdf47 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -17,6 +17,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
+ #include <asm/uaccess.h>
+@@ -67,6 +68,7 @@ struct old_linux_dirent {
+ struct readdir_callback {
+       struct old_linux_dirent __user * dirent;
++      struct file * file;
+       int result;
+ };
+@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
+               buf->result = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       buf->result++;
+       dirent = buf->dirent;
+       if (!access_ok(VERIFY_WRITE, dirent,
+@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+       buf.result = 0;
+       buf.dirent = dirent;
++      buf.file = f.file;
+       error = vfs_readdir(f.file, fillonedir, &buf);
+       if (buf.result)
+@@ -139,6 +146,7 @@ struct linux_dirent {
+ struct getdents_callback {
+       struct linux_dirent __user * current_dir;
+       struct linux_dirent __user * previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+               buf->error = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (__put_user(offset, &dirent->d_off))
+@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
+       buf.previous = NULL;
+       buf.count = count;
+       buf.error = 0;
++      buf.file = f.file;
+       error = vfs_readdir(f.file, filldir, &buf);
+       if (error >= 0)
+@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
+ struct getdents_callback64 {
+       struct linux_dirent64 __user * current_dir;
+       struct linux_dirent64 __user * previous;
++      struct file *file;
+       int count;
+       int error;
+ };
+@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+       buf->error = -EINVAL;   /* only used if we fail.. */
+       if (reclen > buf->count)
+               return -EINVAL;
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (__put_user(offset, &dirent->d_off))
+@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+       buf.current_dir = dirent;
+       buf.previous = NULL;
++      buf.file = f.file;
+       buf.count = count;
+       buf.error = 0;
+@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+               error = buf.error;
+       lastdirent = buf.previous;
+       if (lastdirent) {
+-              typeof(lastdirent->d_off) d_off = f.file->f_pos;
++              typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
+               if (__put_user(d_off, &lastdirent->d_off))
+                       error = -EFAULT;
+               else
+diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
+index 2b7882b..1c5ef48 100644
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
+               return;
+       }
+-      atomic_inc(&(fs_generation(tb->tb_sb)));
++      atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
+       do_balance_starts(tb);
+       /* balance leaf returns 0 except if combining L R and S into
+diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
+index 1d48974..2f8f4e0 100644
+--- a/fs/reiserfs/procfs.c
++++ b/fs/reiserfs/procfs.c
+@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
+                  "SMALL_TAILS " : "NO_TAILS ",
+                  replay_only(sb) ? "REPLAY_ONLY " : "",
+                  convert_reiserfs(sb) ? "CONV " : "",
+-                 atomic_read(&r->s_generation_counter),
++                 atomic_read_unchecked(&r->s_generation_counter),
+                  SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+                  SF(s_do_balance), SF(s_unneeded_left_neighbor),
+                  SF(s_good_search_by_key_reada), SF(s_bmaps),
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index 157e474..65a6114 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
+       /* Comment? -Hans */
+       wait_queue_head_t s_wait;
+       /* To be obsoleted soon by per buffer seals.. -Hans */
+-      atomic_t s_generation_counter;  // increased by one every time the
++      atomic_unchecked_t s_generation_counter;        // increased by one every time the
+       // tree gets re-balanced
+       unsigned long s_properties;     /* File system properties. Currently holds
+                                          on-disk FS format */
+@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
+ #define REISERFS_USER_MEM             1       /* reiserfs user memory mode            */
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s)             \
+diff --git a/fs/select.c b/fs/select.c
+index 8c1c96c..a0f9b6d 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -20,6 +20,7 @@
+ #include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
++#include <linux/security.h>
+ #include <linux/personality.h> /* for STICKY_TIMEOUTS */
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+       struct poll_list *walk = head;
+       unsigned long todo = nfds;
++      gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
+       if (nfds > rlimit(RLIMIT_NOFILE))
+               return -EINVAL;
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index 774c1eb..b67582a 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -10,6 +10,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/cred.h>
++#include <linux/sched.h>
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
+ #ifdef CONFIG_USER_NS
+       p->user_ns = file->f_cred->user_ns;
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      p->exec_id = current->exec_id;
++#endif
+       /*
+        * Wrappers around seq_open(e.g. swaps_open) need to be
+@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
+               return 0;
+       }
+       if (!m->buf) {
+-              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
+               if (!m->buf)
+                       return -ENOMEM;
+       }
+@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
+ Eoverflow:
+       m->op->stop(m, p);
+       kfree(m->buf);
+-      m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++      m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
+       return !m->buf ? -ENOMEM : -EAGAIN;
+ }
+@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+       /* grab buffer if we didn't have one */
+       if (!m->buf) {
+-              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
+               if (!m->buf)
+                       goto Enomem;
+       }
+@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+                       goto Fill;
+               m->op->stop(m, p);
+               kfree(m->buf);
+-              m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++              m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
+               if (!m->buf)
+                       goto Enomem;
+               m->count = 0;
+@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
+ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
+               void *data)
+ {
+-      struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
++      seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
+       int res = -ENOMEM;
+       if (op) {
+diff --git a/fs/splice.c b/fs/splice.c
+index d37431d..81c3044 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+       pipe_lock(pipe);
+       for (;;) {
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+                       page_nr++;
+                       ret += buf->len;
+-                      if (pipe->files)
++                      if (atomic_read(&pipe->files))
+                               do_wakeup = 1;
+                       if (!--spd->nr_pages)
+@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+                       do_wakeup = 0;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+       pipe_unlock(pipe);
+@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
++      res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
+       set_fs(old_fs);
+       return res;
+@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      res = vfs_write(file, (__force const char __user *)buf, count, &pos);
++      res = vfs_write(file, (const char __force_user *)buf, count, &pos);
+       set_fs(old_fs);
+       return res;
+@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+                       goto err;
+               this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+-              vec[i].iov_base = (void __user *) page_address(page);
++              vec[i].iov_base = (void __force_user *) page_address(page);
+               vec[i].iov_len = this_len;
+               spd.pages[i] = page;
+               spd.nr_pages++;
+@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
+                       ops->release(pipe, buf);
+                       pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+                       pipe->nrbufs--;
+-                      if (pipe->files)
++                      if (atomic_read(&pipe->files))
+                               sd->need_wakeup = true;
+               }
+@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
+ {
+       while (!pipe->nrbufs) {
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       return 0;
+-              if (!pipe->waiting_writers && sd->num_spliced)
++              if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+                       return 0;
+               if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+                * out of the pipe right after the splice_to_pipe(). So set
+                * PIPE_READERS appropriately.
+                */
+-              pipe->readers = 1;
++              atomic_set(&pipe->readers, 1);
+               current->splice_pipe = pipe;
+       }
+@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       break;
+-              if (!pipe->waiting_writers) {
++              if (!atomic_read(&pipe->waiting_writers)) {
+                       if (flags & SPLICE_F_NONBLOCK) {
+                               ret = -EAGAIN;
+                               break;
+@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+       pipe_lock(pipe);
+       while (pipe->nrbufs >= pipe->buffers) {
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       ret = -EPIPE;
+                       break;
+@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+       pipe_unlock(pipe);
+@@ -1854,14 +1854,14 @@ retry:
+       pipe_double_lock(ipipe, opipe);
+       do {
+-              if (!opipe->readers) {
++              if (!atomic_read(&opipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+                       break;
+               }
+-              if (!ipipe->nrbufs && !ipipe->writers)
++              if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+                       break;
+               /*
+@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+       pipe_double_lock(ipipe, opipe);
+       do {
+-              if (!opipe->readers) {
++              if (!atomic_read(&opipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+        * return EAGAIN if we have the potential of some data in the
+        * future, otherwise just return 0
+        */
+-      if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++      if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+               ret = -EAGAIN;
+       pipe_unlock(ipipe);
+diff --git a/fs/stat.c b/fs/stat.c
+index 04ce1ac..a13dd1e 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+       stat->gid = inode->i_gid;
+       stat->rdev = inode->i_rdev;
+       stat->size = i_size_read(inode);
+-      stat->atime = inode->i_atime;
+-      stat->mtime = inode->i_mtime;
++      if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++              stat->atime = inode->i_ctime;
++              stat->mtime = inode->i_ctime;
++      } else {
++              stat->atime = inode->i_atime;
++              stat->mtime = inode->i_mtime;
++      }
+       stat->ctime = inode->i_ctime;
+       stat->blksize = (1 << inode->i_blkbits);
+       stat->blocks = inode->i_blocks;
+@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
+       if (retval)
+               return retval;
+-      if (inode->i_op->getattr)
+-              return inode->i_op->getattr(path->mnt, path->dentry, stat);
++      if (inode->i_op->getattr) {
++              retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
++              if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++                      stat->atime = stat->ctime;
++                      stat->mtime = stat->ctime;
++              }
++              return retval;
++      }
+       generic_fillattr(inode, stat);
+       return 0;
+diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
+index 15c68f9..36a8b3e 100644
+--- a/fs/sysfs/bin.c
++++ b/fs/sysfs/bin.c
+@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+       return ret;
+ }
+-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
+-                void *buf, int len, int write)
++static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
++                void *buf, size_t len, int write)
+ {
+       struct file *file = vma->vm_file;
+       struct bin_buffer *bb = file->private_data;
+       struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
+-      int ret;
++      ssize_t ret;
+       if (!bb->vm_ops)
+               return -EINVAL;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index e8e0e71..79c28ac5 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
+  *
+  *    Returns 31 bit hash of ns + name (so it fits in an off_t )
+  */
+-static unsigned int sysfs_name_hash(const void *ns, const char *name)
++static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
+ {
+       unsigned long hash = init_name_hash();
+       unsigned int len = strlen(name);
+@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
+       struct sysfs_dirent *sd;
+       int rc;
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++      const char *parent_name = parent_sd->s_name;
++
++      mode = S_IFDIR | S_IRWXU;
++
++      if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
++          (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
++          (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
++          (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
++              mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++#endif
++
+       /* allocate */
+       sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
+       if (!sd)
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 602f56d..6853db8 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
+ struct sysfs_open_dirent {
+       atomic_t                refcnt;
+-      atomic_t                event;
++      atomic_unchecked_t      event;
+       wait_queue_head_t       poll;
+       struct list_head        buffers; /* goes through sysfs_buffer.list */
+ };
+@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
+       if (!sysfs_get_active(attr_sd))
+               return -ENODEV;
+-      buffer->event = atomic_read(&attr_sd->s_attr.open->event);
++      buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
+       count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
+       sysfs_put_active(attr_sd);
+@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
+               return -ENOMEM;
+       atomic_set(&new_od->refcnt, 0);
+-      atomic_set(&new_od->event, 1);
++      atomic_set_unchecked(&new_od->event, 1);
+       init_waitqueue_head(&new_od->poll);
+       INIT_LIST_HEAD(&new_od->buffers);
+       goto retry;
+@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
+       sysfs_put_active(attr_sd);
+-      if (buffer->event != atomic_read(&od->event))
++      if (buffer->event != atomic_read_unchecked(&od->event))
+               goto trigger;
+       return DEFAULT_POLLMASK;
+@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
+       od = sd->s_attr.open;
+       if (od) {
+-              atomic_inc(&od->event);
++              atomic_inc_unchecked(&od->event);
+               wake_up_interruptible(&od->poll);
+       }
+diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
+index 8c940df..25b733e 100644
+--- a/fs/sysfs/symlink.c
++++ b/fs/sysfs/symlink.c
+@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+-      char *page = nd_get_link(nd);
++      const char *page = nd_get_link(nd);
+       if (!IS_ERR(page))
+               free_page((unsigned long)page);
+ }
+diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
+index 69d4889..a810bd4 100644
+--- a/fs/sysv/sysv.h
++++ b/fs/sysv/sysv.h
+@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
+ #endif
+ }
+-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_PDP)
+               return PDP_swab((__force __u32)n);
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index e18b988..f1d4ad0f 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
+       return err;
+ }
+-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
++int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
+ {
+       int err;
+diff --git a/fs/udf/misc.c b/fs/udf/misc.c
+index c175b4d..8f36a16 100644
+--- a/fs/udf/misc.c
++++ b/fs/udf/misc.c
+@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
+ u8 udf_tag_checksum(const struct tag *t)
+ {
+-      u8 *data = (u8 *)t;
++      const u8 *data = (const u8 *)t;
+       u8 checksum = 0;
+       int i;
+       for (i = 0; i < sizeof(struct tag); ++i)
+diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
+index 8d974c4..b82f6ec 100644
+--- a/fs/ufs/swab.h
++++ b/fs/ufs/swab.h
+@@ -22,7 +22,7 @@ enum {
+       BYTESEX_BE
+ };
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(struct super_block *sbp, __fs64 n)
+ {
+       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
+               return (__force __fs64)cpu_to_be64(n);
+ }
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(struct super_block *sbp, __fs32 n)
+ {
+       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+diff --git a/fs/utimes.c b/fs/utimes.c
+index f4fb7ec..3fe03c0 100644
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -1,6 +1,7 @@
+ #include <linux/compiler.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/security.h>
+ #include <linux/linkage.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
+                               goto mnt_drop_write_and_out;
+               }
+       }
++
++      if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
++              error = -EACCES;
++              goto mnt_drop_write_and_out;
++      }
++
+       mutex_lock(&inode->i_mutex);
+       error = notify_change(path->dentry, &newattrs);
+       mutex_unlock(&inode->i_mutex);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 3377dff..4d074d9 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+       return rc;
+ }
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t
++pax_getxattr(struct dentry *dentry, void *value, size_t size)
++{
++      struct inode *inode = dentry->d_inode;
++      ssize_t error;
++
++      error = inode_permission(inode, MAY_EXEC);
++      if (error)
++              return error;
++
++      if (inode->i_op->getxattr)
++              error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
++      else
++              error = -EOPNOTSUPP;
++
++      return error;
++}
++EXPORT_SYMBOL(pax_getxattr);
++#endif
++
+ ssize_t
+ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+ {
+@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+  * Extended attribute SET operations
+  */
+ static long
+-setxattr(struct dentry *d, const char __user *name, const void __user *value,
++setxattr(struct path *path, const char __user *name, const void __user *value,
+        size_t size, int flags)
+ {
+       int error;
+@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
+                       posix_acl_fix_xattr_from_user(kvalue, size);
+       }
+-      error = vfs_setxattr(d, kname, kvalue, size, flags);
++      if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
++              error = -EACCES;
++              goto out;
++      }
++
++      error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
+ out:
+       if (vvalue)
+               vfree(vvalue);
+@@ -377,7 +403,7 @@ retry:
+               return error;
+       error = mnt_want_write(path.mnt);
+       if (!error) {
+-              error = setxattr(path.dentry, name, value, size, flags);
++              error = setxattr(&path, name, value, size, flags);
+               mnt_drop_write(path.mnt);
+       }
+       path_put(&path);
+@@ -401,7 +427,7 @@ retry:
+               return error;
+       error = mnt_want_write(path.mnt);
+       if (!error) {
+-              error = setxattr(path.dentry, name, value, size, flags);
++              error = setxattr(&path, name, value, size, flags);
+               mnt_drop_write(path.mnt);
+       }
+       path_put(&path);
+@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+               const void __user *,value, size_t, size, int, flags)
+ {
+       struct fd f = fdget(fd);
+-      struct dentry *dentry;
+       int error = -EBADF;
+       if (!f.file)
+               return error;
+-      dentry = f.file->f_path.dentry;
+-      audit_inode(NULL, dentry, 0);
++      audit_inode(NULL, f.file->f_path.dentry, 0);
+       error = mnt_want_write_file(f.file);
+       if (!error) {
+-              error = setxattr(dentry, name, value, size, flags);
++              error = setxattr(&f.file->f_path, name, value, size, flags);
+               mnt_drop_write_file(f.file);
+       }
+       fdput(f);
+diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
+index 9fbea87..6b19972 100644
+--- a/fs/xattr_acl.c
++++ b/fs/xattr_acl.c
+@@ -76,8 +76,8 @@ struct posix_acl *
+ posix_acl_from_xattr(struct user_namespace *user_ns,
+                    const void *value, size_t size)
+ {
+-      posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+-      posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
++      const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
++      const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
+       int count;
+       struct posix_acl *acl;
+       struct posix_acl_entry *acl_e;
+diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
+index 8904284..ee0e14b 100644
+--- a/fs/xfs/xfs_bmap.c
++++ b/fs/xfs/xfs_bmap.c
+@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
+ #else
+ #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)               do { } while (0)
+-#define       xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define       xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)    do { } while (0)
+ #endif /* DEBUG */
+ /*
+diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
+index 6157424..ac98f6d 100644
+--- a/fs/xfs/xfs_dir2_sf.c
++++ b/fs/xfs/xfs_dir2_sf.c
+@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
+               }
+               ino = xfs_dir2_sfe_get_ino(sfp, sfep);
+-              if (filldir(dirent, (char *)sfep->name, sfep->namelen,
++              if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
++                      char name[sfep->namelen];
++                      memcpy(name, sfep->name, sfep->namelen);
++                      if (filldir(dirent, name, sfep->namelen,
++                          off & 0x7fffffff, ino, DT_UNKNOWN)) {
++                              *offset = off & 0x7fffffff;
++                              return 0;
++                      }
++              } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
+                           off & 0x7fffffff, ino, DT_UNKNOWN)) {
+                       *offset = off & 0x7fffffff;
+                       return 0;
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 5e99968..45bd327 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -127,7 +127,7 @@ xfs_find_handle(
+       }
+       error = -EFAULT;
+-      if (copy_to_user(hreq->ohandle, &handle, hsize) ||
++      if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
+           copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
+               goto out_put;
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index ca9ecaa..60100c7 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -395,7 +395,7 @@ xfs_vn_put_link(
+       struct nameidata *nd,
+       void            *p)
+ {
+-      char            *s = nd_get_link(nd);
++      const char      *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
+new file mode 100644
+index 0000000..712a85d
+--- /dev/null
++++ b/grsecurity/Kconfig
+@@ -0,0 +1,1043 @@
++#
++# grecurity configuration
++#
++menu "Memory Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++      bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
++      default y if GRKERNSEC_CONFIG_AUTO
++      select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
++      help
++        If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++        be written to or read from to modify or leak the contents of the running
++        kernel.  /dev/port will also not be allowed to be opened and support
++        for /dev/cpu/*/msr will be removed.  If you have module
++        support disabled, enabling this will close up five ways that are
++        currently used  to insert malicious code into the running kernel.
++
++        Even with all these features enabled, we still highly recommend that
++        you use the RBAC system, as it is still possible for an attacker to
++        modify the running kernel through privileged I/O granted by ioperm/iopl.
++
++        If you are not using XFree86, you may be able to stop this additional
++        case by enabling the 'Disable privileged I/O' option. Though nothing
++        legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++        but only to video memory, which is the only writing we allow in this
++        case.  If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++        not be allowed to mprotect it with PROT_WRITE later.
++        Enabling this feature will prevent the "cpupower" and "powertop" tools
++        from working.
++
++        It is highly recommended that you say Y here if you meet all the
++        conditions above.
++
++config GRKERNSEC_VM86
++      bool "Restrict VM86 mode"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on X86_32
++
++      help
++        If you say Y here, only processes with CAP_SYS_RAWIO will be able to
++        make use of a special execution mode on 32bit x86 processors called
++        Virtual 8086 (VM86) mode.  XFree86 may need vm86 mode for certain
++        video cards and will still work with this option enabled.  The purpose
++        of the option is to prevent exploitation of emulation errors in
++        virtualization of vm86 mode like the one discovered in VMWare in 2009.
++        Nearly all users should be able to enable this option.
++
++config GRKERNSEC_IO
++      bool "Disable privileged I/O"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on X86
++      select RTC_CLASS
++      select RTC_INTF_DEV
++      select RTC_DRV_CMOS
++
++      help
++        If you say Y here, all ioperm and iopl calls will return an error.
++        Ioperm and iopl can be used to modify the running kernel.
++        Unfortunately, some programs need this access to operate properly,
++        the most notable of which are XFree86 and hwclock.  hwclock can be
++        remedied by having RTC support in the kernel, so real-time 
++        clock support is enabled if this option is enabled, to ensure 
++        that hwclock operates correctly.  XFree86 still will not 
++        operate correctly with this option enabled, so DO NOT CHOOSE Y 
++        IF YOU USE XFree86.  If you use XFree86 and you still want to 
++        protect your kernel against modification, use the RBAC system.
++
++config GRKERNSEC_JIT_HARDEN
++      bool "Harden BPF JIT against spray attacks"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on BPF_JIT
++      help
++        If you say Y here, the native code generated by the kernel's Berkeley
++        Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
++        attacks that attempt to fit attacker-beneficial instructions in
++        32bit immediate fields of JIT-generated native instructions.  The
++        attacker will generally aim to cause an unintended instruction sequence
++        of JIT-generated native code to execute by jumping into the middle of
++        a generated instruction.  This feature effectively randomizes the 32bit
++        immediate constants present in the generated code to thwart such attacks.
++
++        If you're using KERNEXEC, it's recommended that you enable this option
++        to supplement the hardening of the kernel.
++  
++config GRKERNSEC_PERF_HARDEN
++      bool "Disable unprivileged PERF_EVENTS usage by default"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PERF_EVENTS
++      help
++        If you say Y here, the range of acceptable values for the
++        /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
++        default to a new value: 3.  When the sysctl is set to this value, no
++        unprivileged use of the PERF_EVENTS syscall interface will be permitted.
++
++        Though PERF_EVENTS can be used legitimately for performance monitoring
++        and low-level application profiling, it is forced on regardless of
++        configuration, has been at fault for several vulnerabilities, and
++        creates new opportunities for side channels and other information leaks.
++
++        This feature puts PERF_EVENTS into a secure default state and permits
++        the administrator to change out of it temporarily if unprivileged
++        application profiling is needed.
++
++config GRKERNSEC_RAND_THREADSTACK
++      bool "Insert random gaps between thread stacks"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_RANDMMAP && !PPC
++      help
++        If you say Y here, a random-sized gap will be enforced between allocated
++        thread stacks.  Glibc's NPTL and other threading libraries that
++        pass MAP_STACK to the kernel for thread stack allocation are supported.
++        The implementation currently provides 8 bits of entropy for the gap.
++
++        Many distributions do not compile threaded remote services with the
++        -fstack-check argument to GCC, causing the variable-sized stack-based
++        allocator, alloca(), to not probe the stack on allocation.  This
++        permits an unbounded alloca() to skip over any guard page and potentially
++        modify another thread's stack reliably.  An enforced random gap
++        reduces the reliability of such an attack and increases the chance
++        that such a read/write to another thread's stack instead lands in
++        an unmapped area, causing a crash and triggering grsecurity's
++        anti-bruteforcing logic.
++
++config GRKERNSEC_PROC_MEMMAP
++      bool "Harden ASLR against information leaks and entropy reduction"
++      default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
++      depends on PAX_NOEXEC || PAX_ASLR
++      help
++        If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++        give no information about the addresses of its mappings if
++        PaX features that rely on random addresses are enabled on the task.
++        In addition to sanitizing this information and disabling other
++        dangerous sources of information, this option causes reads of sensitive
++        /proc/<pid> entries where the file descriptor was opened in a different
++        task than the one performing the read.  Such attempts are logged.
++        This option also limits argv/env strings for suid/sgid binaries
++        to 512KB to prevent a complete exhaustion of the stack entropy provided
++        by ASLR.  Finally, it places an 8MB stack resource limit on suid/sgid
++        binaries to prevent alternative mmap layouts from being abused.
++
++        If you use PaX it is essential that you say Y here as it closes up
++        several holes that make full ASLR useless locally.
++
++config GRKERNSEC_BRUTE
++      bool "Deter exploit bruteforcing"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, attempts to bruteforce exploits against forking
++        daemons such as apache or sshd, as well as against suid/sgid binaries
++        will be deterred.  When a child of a forking daemon is killed by PaX
++        or crashes due to an illegal instruction or other suspicious signal,
++        the parent process will be delayed 30 seconds upon every subsequent
++        fork until the administrator is able to assess the situation and
++        restart the daemon.
++        In the suid/sgid case, the attempt is logged, the user has all their
++        existing instances of the suid/sgid binary terminated and will
++        be unable to execute any suid/sgid binaries for 15 minutes.
++
++        It is recommended that you also enable signal logging in the auditing
++        section so that logs are generated when a process triggers a suspicious
++        signal.
++        If the sysctl option is enabled, a sysctl option with name
++        "deter_bruteforce" is created.
++
++
++config GRKERNSEC_MODHARDEN
++      bool "Harden module auto-loading"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on MODULES
++      help
++        If you say Y here, module auto-loading in response to use of some
++        feature implemented by an unloaded module will be restricted to
++        root users.  Enabling this option helps defend against attacks 
++        by unprivileged users who abuse the auto-loading behavior to 
++        cause a vulnerable module to load that is then exploited.
++
++        If this option prevents a legitimate use of auto-loading for a 
++        non-root user, the administrator can execute modprobe manually 
++        with the exact name of the module mentioned in the alert log.
++        Alternatively, the administrator can add the module to the list
++        of modules loaded at boot by modifying init scripts.
++
++        Modification of init scripts will most likely be needed on 
++        Ubuntu servers with encrypted home directory support enabled,
++        as the first non-root user logging in will cause the ecb(aes),
++        ecb(aes)-all, cbc(aes), and cbc(aes)-all  modules to be loaded.
++
++config GRKERNSEC_HIDESYM
++      bool "Hide kernel symbols"
++      default y if GRKERNSEC_CONFIG_AUTO
++      select PAX_USERCOPY_SLABS
++      help
++        If you say Y here, getting information on loaded modules, and
++        displaying all kernel symbols through a syscall will be restricted
++        to users with CAP_SYS_MODULE.  For software compatibility reasons,
++        /proc/kallsyms will be restricted to the root user.  The RBAC
++        system can hide that entry even from root.
++
++        This option also prevents leaking of kernel addresses through
++        several /proc entries.
++
++        Note that this option is only effective provided the following
++        conditions are met:
++        1) The kernel using grsecurity is not precompiled by some distribution
++        2) You have also enabled GRKERNSEC_DMESG
++        3) You are using the RBAC system and hiding other files such as your
++           kernel image and System.map.  Alternatively, enabling this option
++           causes the permissions on /boot, /lib/modules, and the kernel
++           source directory to change at compile time to prevent 
++           reading by non-root users.
++        If the above conditions are met, this option will aid in providing a
++        useful protection against local kernel exploitation of overflows
++        and arbitrary read/write vulnerabilities.
++
++        It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
++        in addition to this feature.
++
++config GRKERNSEC_KERN_LOCKOUT
++      bool "Active kernel exploit response"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on X86 || ARM || PPC || SPARC
++      help
++        If you say Y here, when a PaX alert is triggered due to suspicious
++        activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
++        or an OOPS occurs due to bad memory accesses, instead of just
++        terminating the offending process (and potentially allowing
++        a subsequent exploit from the same user), we will take one of two
++        actions:
++         If the user was root, we will panic the system
++         If the user was non-root, we will log the attempt, terminate
++         all processes owned by the user, then prevent them from creating
++         any new processes until the system is restarted
++        This deters repeated kernel exploitation/bruteforcing attempts
++        and is useful for later forensics.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_RBAC_DEBUG
++      bool
++
++config GRKERNSEC_NO_RBAC
++      bool "Disable RBAC system"
++      help
++        If you say Y here, the /dev/grsec device will be removed from the kernel,
++        preventing the RBAC system from being enabled.  You should only say Y
++        here if you have no intention of using the RBAC system, so as to prevent
++        an attacker with root access from misusing the RBAC system to hide files
++        and processes when loadable module support and /dev/[k]mem have been
++        locked down.
++
++config GRKERNSEC_ACL_HIDEKERN
++      bool "Hide kernel processes"
++      help
++        If you say Y here, all kernel threads will be hidden to all
++        processes but those whose subject has the "view hidden processes"
++        flag.
++
++config GRKERNSEC_ACL_MAXTRIES
++      int "Maximum tries before password lockout"
++      default 3
++      help
++        This option enforces the maximum number of times a user can attempt
++        to authorize themselves with the grsecurity RBAC system before being
++        denied the ability to attempt authorization again for a specified time.
++        The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++      int "Time to wait after max password tries, in seconds"
++      default 30
++      help
++        This option specifies the time the user must wait after attempting to
++        authorize to the RBAC system with the maximum number of invalid
++        passwords.  The higher the number, the harder it will be to brute-force
++        a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++      bool "Proc restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, the permissions of the /proc filesystem
++        will be altered to enhance system security and privacy.  You MUST
++        choose either a user only restriction or a user and group restriction.
++        Depending upon the option you choose, you can either restrict users to
++        see only the processes they themselves run, or choose a group that can
++        view all processes and files normally restricted to root if you choose
++        the "restrict to user only" option.  NOTE: If you're running identd or
++        ntpd as a non-root user, you will have to run it as the group you
++        specify here.
++
++config GRKERNSEC_PROC_USER
++      bool "Restrict /proc to user only"
++      depends on GRKERNSEC_PROC
++      help
++        If you say Y here, non-root users will only be able to view their own
++        processes, and restricts them from viewing network-related information,
++        and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++      bool "Allow special group"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++      help
++        If you say Y here, you will be able to select a group that will be
++        able to view all processes and network-related information.  If you've
++        enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
++        remain hidden.  This option is useful if you want to run identd as
++        a non-root user.  The group you select may also be chosen at boot time
++        via "grsec_proc_gid=" on the kernel commandline.
++
++config GRKERNSEC_PROC_GID
++      int "GID for special group"
++      depends on GRKERNSEC_PROC_USERGROUP
++      default 1001
++
++config GRKERNSEC_PROC_ADD
++      bool "Additional restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++      help
++        If you say Y here, additional restrictions will be placed on
++        /proc that keep normal users from viewing device information and 
++        slabinfo information that could be useful for exploits.
++
++config GRKERNSEC_LINK
++      bool "Linking restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, /tmp race exploits will be prevented, since users
++        will no longer be able to follow symlinks owned by other users in
++        world-writable +t directories (e.g. /tmp), unless the owner of the
++        symlink is the owner of the directory. users will also not be
++        able to hardlink to files they do not own.  If the sysctl option is
++        enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_SYMLINKOWN
++      bool "Kernel-enforced SymlinksIfOwnerMatch"
++      default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++      help
++        Apache's SymlinksIfOwnerMatch option has an inherent race condition
++        that prevents it from being used as a security feature.  As Apache
++        verifies the symlink by performing a stat() against the target of
++        the symlink before it is followed, an attacker can setup a symlink
++        to point to a same-owned file, then replace the symlink with one
++        that targets another user's file just after Apache "validates" the
++        symlink -- a classic TOCTOU race.  If you say Y here, a complete,
++        race-free replacement for Apache's "SymlinksIfOwnerMatch" option
++        will be in place for the group you specify. If the sysctl option
++        is enabled, a sysctl option with name "enforce_symlinksifowner" is
++        created.
++
++config GRKERNSEC_SYMLINKOWN_GID
++      int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
++      depends on GRKERNSEC_SYMLINKOWN
++      default 1006
++      help
++        Setting this GID determines what group kernel-enforced
++        SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
++        is enabled, a sysctl option with name "symlinkown_gid" is created.
++
++config GRKERNSEC_FIFO
++      bool "FIFO restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, users will not be able to write to FIFOs they don't
++        own in world-writable +t directories (e.g. /tmp), unless the owner of
++        the FIFO is the same owner of the directory it's held in.  If the sysctl
++        option is enabled, a sysctl option with name "fifo_restrictions" is
++        created.
++
++config GRKERNSEC_SYSFS_RESTRICT
++      bool "Sysfs/debugfs restriction"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on SYSFS
++      help
++        If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++        any filesystem normally mounted under it (e.g. debugfs) will be
++        mostly accessible only by root.  These filesystems generally provide access
++        to hardware and debug information that isn't appropriate for unprivileged
++        users of the system.  Sysfs and debugfs have also become a large source
++        of new vulnerabilities, ranging from infoleaks to local compromise.
++        There has been very little oversight with an eye toward security involved
++        in adding new exporters of information to these filesystems, so their
++        use is discouraged.
++        For reasons of compatibility, a few directories have been whitelisted
++        for access by non-root users:
++        /sys/fs/selinux
++        /sys/fs/fuse
++        /sys/devices/system/cpu
++
++config GRKERNSEC_ROFS
++      bool "Runtime read-only mount protection"
++      help
++        If you say Y here, a sysctl option with name "romount_protect" will
++        be created.  By setting this option to 1 at runtime, filesystems
++        will be protected in the following ways:
++        * No new writable mounts will be allowed
++        * Existing read-only mounts won't be able to be remounted read/write
++        * Write operations will be denied on all block devices
++        This option acts independently of grsec_lock: once it is set to 1,
++        it cannot be turned off.  Therefore, please be mindful of the resulting
++        behavior if this option is enabled in an init script on a read-only
++        filesystem.  This feature is mainly intended for secure embedded systems.
++
++config GRKERNSEC_DEVICE_SIDECHANNEL
++      bool "Eliminate stat/notify-based device sidechannels"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, timing analyses on block or character
++        devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++        will be thwarted for unprivileged users.  If a process without
++        CAP_MKNOD stats such a device, the last access and last modify times
++        will match the device's create time.  No access or modify events
++        will be triggered through inotify/dnotify/fanotify for such devices.
++        This feature will prevent attacks that may at a minimum
++        allow an attacker to determine the administrator's password length.
++
++config GRKERNSEC_CHROOT
++      bool "Chroot jail restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, you will be able to choose several options that will
++        make breaking out of a chrooted jail much more difficult.  If you
++        encounter no software incompatibilities with the following options, it
++        is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++      bool "Deny mounts"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        mount or remount filesystems.  If the sysctl option is enabled, a
++        sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++      bool "Deny double-chroots"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to chroot
++        again outside the chroot.  This is a widely used method of breaking
++        out of a chroot jail and should not be allowed.  If the sysctl 
++        option is enabled, a sysctl option with name 
++        "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++      bool "Deny pivot_root in chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to use
++        a function called pivot_root() that was introduced in Linux 2.3.41.  It
++        works similar to chroot in that it changes the root filesystem.  This
++        function could be misused in a chrooted process to attempt to break out
++        of the chroot, and therefore should not be allowed.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_deny_pivot" is
++        created.
++
++config GRKERNSEC_CHROOT_CHDIR
++      bool "Enforce chdir(\"/\") on all chroots"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, the current working directory of all newly-chrooted
++        applications will be set to the the root directory of the chroot.
++        The man page on chroot(2) states:
++        Note that this call does not change  the  current  working
++        directory,  so  that `.' can be outside the tree rooted at
++        `/'.  In particular, the  super-user  can  escape  from  a
++        `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++        It is recommended that you say Y here, since it's not known to break
++        any software.  If the sysctl option is enabled, a sysctl option with
++        name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++      bool "Deny (f)chmod +s"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to chmod
++        or fchmod files to make them have suid or sgid bits.  This protects
++        against another published method of breaking a chroot.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_deny_chmod" is
++        created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++      bool "Deny fchdir out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, a well-known method of breaking chroots by fchdir'ing
++        to a file descriptor of the chrooting process that points to a directory
++        outside the filesystem will be stopped.  If the sysctl option
++        is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++      bool "Deny mknod"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be allowed to
++        mknod.  The problem with using mknod inside a chroot is that it
++        would allow an attacker to create a device entry that is the same
++        as one on the physical root of your system, which could range from
++        anything from the console device to a device for your harddrive (which
++        they could then use to wipe the drive or steal data).  It is recommended
++        that you say Y here, unless you run into software incompatibilities.
++        If the sysctl option is enabled, a sysctl option with name
++        "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++      bool "Deny shmat() out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to attach
++        to shared memory segments that were created outside of the chroot jail.
++        It is recommended that you say Y here.  If the sysctl option is enabled,
++        a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++      bool "Deny access to abstract AF_UNIX sockets out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        connect to abstract (meaning not belonging to a filesystem) Unix
++        domain sockets that were bound outside of a chroot.  It is recommended
++        that you say Y here.  If the sysctl option is enabled, a sysctl option
++        with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++      bool "Protect outside processes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, 
++        getsid, or view any process outside of the chroot.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_findtask" is
++        created.
++
++config GRKERNSEC_CHROOT_NICE
++      bool "Restrict priority changes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to raise
++        the priority of processes in the chroot, or alter the priority of
++        processes outside the chroot.  This provides more security than simply
++        removing CAP_SYS_NICE from the process' capability set.  If the
++        sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++        is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++      bool "Deny sysctl writes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, an attacker in a chroot will not be able to
++        write to sysctl entries, either by sysctl(2) or through a /proc
++        interface.  It is strongly recommended that you say Y here. If the
++        sysctl option is enabled, a sysctl option with name
++        "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++      bool "Capability restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, the capabilities on all processes within a
++        chroot jail will be lowered to stop module insertion, raw i/o,
++        system and net admin tasks, rebooting the system, modifying immutable
++        files, modifying IPC owned by another, and changing the system time.
++        This is left an option because it can break some apps.  Disable this
++        if your chrooted apps are having problems performing those kinds of
++        tasks.  If the sysctl option is enabled, a sysctl option with
++        name "chroot_caps" is created.
++
++config GRKERNSEC_CHROOT_INITRD
++      bool "Exempt initrd tasks from restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
++      help
++        If you say Y here, tasks started prior to init will be exempted from
++        grsecurity's chroot restrictions.  This option is mainly meant to
++        resolve Plymouth's performing privileged operations unnecessarily
++        in a chroot.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++      bool "Single group for auditing"
++      help
++        If you say Y here, the exec and chdir logging features will only operate
++        on a group you specify.  This option is recommended if you only want to
++        watch certain users instead of having a large amount of logs from the
++        entire system.  If the sysctl option is enabled, a sysctl option with
++        name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++      int "GID for auditing"
++      depends on GRKERNSEC_AUDIT_GROUP
++      default 1007
++
++config GRKERNSEC_EXECLOG
++      bool "Exec logging"
++      help
++        If you say Y here, all execve() calls will be logged (since the
++        other exec*() calls are frontends to execve(), all execution
++        will be logged).  Useful for shell-servers that like to keep track
++        of their users.  If the sysctl option is enabled, a sysctl option with
++        name "exec_logging" is created.
++        WARNING: This option when enabled will produce a LOT of logs, especially
++        on an active system.
++
++config GRKERNSEC_RESLOG
++      bool "Resource logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, all attempts to overstep resource limits will
++        be logged with the resource name, the requested size, and the current
++        limit.  It is highly recommended that you say Y here.  If the sysctl
++        option is enabled, a sysctl option with name "resource_logging" is
++        created.  If the RBAC system is enabled, the sysctl value is ignored.
++
++config GRKERNSEC_CHROOT_EXECLOG
++      bool "Log execs within chroot"
++      help
++        If you say Y here, all executions inside a chroot jail will be logged
++        to syslog.  This can cause a large amount of logs if certain
++        applications (eg. djb's daemontools) are installed on the system, and
++        is therefore left as an option.  If the sysctl option is enabled, a
++        sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_PTRACE
++      bool "Ptrace logging"
++      help
++        If you say Y here, all attempts to attach to a process via ptrace
++        will be logged.  If the sysctl option is enabled, a sysctl option
++        with name "audit_ptrace" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++      bool "Chdir logging"
++      help
++        If you say Y here, all chdir() calls will be logged.  If the sysctl
++        option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++      bool "(Un)Mount logging"
++      help
++        If you say Y here, all mounts and unmounts will be logged.  If the
++        sysctl option is enabled, a sysctl option with name "audit_mount" is
++        created.
++
++config GRKERNSEC_SIGNAL
++      bool "Signal logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, certain important signals will be logged, such as
++        SIGSEGV, which will as a result inform you of when a error in a program
++        occurred, which in some cases could mean a possible exploit attempt.
++        If the sysctl option is enabled, a sysctl option with name
++        "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++      bool "Fork failure logging"
++      help
++        If you say Y here, all failed fork() attempts will be logged.
++        This could suggest a fork bomb, or someone attempting to overstep
++        their process limit.  If the sysctl option is enabled, a sysctl option
++        with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++      bool "Time change logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, any changes of the system clock will be logged.
++        If the sysctl option is enabled, a sysctl option with name
++        "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++      bool "/proc/<pid>/ipaddr support"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, a new entry will be added to each /proc/<pid>
++        directory that contains the IP address of the person using the task.
++        The IP is carried across local TCP and AF_UNIX stream sockets.
++        This information can be useful for IDS/IPSes to perform remote response
++        to a local attack.  The entry is readable by only the owner of the
++        process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++        the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_RWXMAP_LOG
++      bool 'Denied RWX mmap/mprotect logging'
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
++      help
++        If you say Y here, calls to mmap() and mprotect() with explicit
++        usage of PROT_WRITE and PROT_EXEC together will be logged when
++        denied by the PAX_MPROTECT feature.  This feature will also
++        log other problematic scenarios that can occur when PAX_MPROTECT
++        is enabled on a binary, like textrels and PT_GNU_STACK.  If the 
++          sysctl option is enabled, a sysctl option with name "rwxmap_logging"
++        is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DMESG
++      bool "Dmesg(8) restriction"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, non-root users will not be able to use dmesg(8)
++        to view the contents of the kernel's circular log buffer.
++        The kernel's log buffer often contains kernel addresses and other
++        identifying information useful to an attacker in fingerprinting a
++        system for a targeted exploit.
++        If the sysctl option is enabled, a sysctl option with name "dmesg" is
++        created.
++
++config GRKERNSEC_HARDEN_PTRACE
++      bool "Deter ptrace-based process snooping"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, TTY sniffers and other malicious monitoring
++        programs implemented through ptrace will be defeated.  If you
++        have been using the RBAC system, this option has already been
++        enabled for several years for all users, with the ability to make
++        fine-grained exceptions.
++
++        This option only affects the ability of non-root users to ptrace
++        processes that are not a descendent of the ptracing process.
++        This means that strace ./binary and gdb ./binary will still work,
++        but attaching to arbitrary processes will not.  If the sysctl
++        option is enabled, a sysctl option with name "harden_ptrace" is
++        created.
++
++config GRKERNSEC_PTRACE_READEXEC
++      bool "Require read access to ptrace sensitive binaries"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, unprivileged users will not be able to ptrace unreadable
++        binaries.  This option is useful in environments that
++        remove the read bits (e.g. file mode 4711) from suid binaries to
++        prevent infoleaking of their contents.  This option adds
++        consistency to the use of that file mode, as the binary could normally
++        be read out when run without privileges while ptracing.
++
++        If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
++        is created.
++
++config GRKERNSEC_SETXID
++      bool "Enforce consistent multithreaded privileges"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on (X86 || SPARC64 || PPC || ARM || MIPS)
++      help
++        If you say Y here, a change from a root uid to a non-root uid
++        in a multithreaded application will cause the resulting uids,
++        gids, supplementary groups, and capabilities in that thread
++        to be propagated to the other threads of the process.  In most
++        cases this is unnecessary, as glibc will emulate this behavior
++        on behalf of the application.  Other libcs do not act in the
++        same way, allowing the other threads of the process to continue
++        running with root privileges.  If the sysctl option is enabled,
++        a sysctl option with name "consistent_setxid" is created.
++
++config GRKERNSEC_TPE
++      bool "Trusted Path Execution (TPE)"
++      default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++      help
++        If you say Y here, you will be able to choose a gid to add to the
++        supplementary groups of users you want to mark as "untrusted."
++        These users will not be able to execute any files that are not in
++        root-owned directories writable only by root.  If the sysctl option
++        is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++      bool "Partially restrict all non-root users"
++      depends on GRKERNSEC_TPE
++      help
++        If you say Y here, all non-root users will be covered under
++        a weaker TPE restriction.  This is separate from, and in addition to,
++        the main TPE options that you have selected elsewhere.  Thus, if a
++        "trusted" GID is chosen, this restriction applies to even that GID.
++        Under this restriction, all non-root users will only be allowed to
++        execute files in directories they own that are not group or
++        world-writable, or in directories owned by root and writable only by
++        root.  If the sysctl option is enabled, a sysctl option with name
++        "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_INVERT
++      bool "Invert GID option"
++      depends on GRKERNSEC_TPE
++      help
++        If you say Y here, the group you specify in the TPE configuration will
++        decide what group TPE restrictions will be *disabled* for.  This
++        option is useful if you want TPE restrictions to be applied to most
++        users on the system.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_invert" is created.  Unlike other sysctl options, this
++        entry will default to on for backward-compatibility.
++
++config GRKERNSEC_TPE_GID
++      int
++      default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
++      default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
++      
++config GRKERNSEC_TPE_UNTRUSTED_GID
++      int "GID for TPE-untrusted users"
++      depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++      default 1005
++      help
++        Setting this GID determines what group TPE restrictions will be
++        *enabled* for.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_TRUSTED_GID
++      int "GID for TPE-trusted users"
++      depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++      default 1005
++      help
++        Setting this GID determines what group TPE restrictions will be
++        *disabled* for.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++      bool "Larger entropy pools"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, the entropy pools used for many features of Linux
++        and grsecurity will be doubled in size.  Since several grsecurity
++        features use additional randomness, it is recommended that you say Y
++        here.  Saying Y here has a similar effect as modifying
++        /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_BLACKHOLE
++      bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on NET
++      help
++        If you say Y here, neither TCP resets nor ICMP
++        destination-unreachable packets will be sent in response to packets
++        sent to ports for which no associated listening process exists.
++        This feature supports both IPV4 and IPV6 and exempts the 
++        loopback interface from blackholing.  Enabling this feature 
++        makes a host more resilient to DoS attacks and reduces network
++        visibility against scanners.
++
++        The blackhole feature as-implemented is equivalent to the FreeBSD
++        blackhole feature, as it prevents RST responses to all packets, not
++        just SYNs.  Under most application behavior this causes no
++        problems, but applications (like haproxy) may not close certain
++        connections in a way that cleanly terminates them on the remote
++        end, leaving the remote host in LAST_ACK state.  Because of this
++        side-effect and to prevent intentional LAST_ACK DoSes, this
++        feature also adds automatic mitigation against such attacks.
++        The mitigation drastically reduces the amount of time a socket
++        can spend in LAST_ACK state.  If you're using haproxy and not
++        all servers it connects to have this option enabled, consider
++        disabling this feature on the haproxy host.
++
++        If the sysctl option is enabled, two sysctl options with names
++        "ip_blackhole" and "lastack_retries" will be created.
++        While "ip_blackhole" takes the standard zero/non-zero on/off
++        toggle, "lastack_retries" uses the same kinds of values as
++        "tcp_retries1" and "tcp_retries2".  The default value of 4
++        prevents a socket from lasting more than 45 seconds in LAST_ACK
++        state.
++
++config GRKERNSEC_NO_SIMULT_CONNECT
++      bool "Disable TCP Simultaneous Connect"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on NET
++      help
++        If you say Y here, a feature by Willy Tarreau will be enabled that
++        removes a weakness in Linux's strict implementation of TCP that
++        allows two clients to connect to each other without either entering
++        a listening state.  The weakness allows an attacker to easily prevent
++        a client from connecting to a known server provided the source port
++        for the connection is guessed correctly.
++
++        As the weakness could be used to prevent an antivirus or IPS from
++        fetching updates, or prevent an SSL gateway from fetching a CRL,
++        it should be eliminated by enabling this option.  Though Linux is
++        one of few operating systems supporting simultaneous connect, it
++        has no legitimate use in practice and is rarely supported by firewalls.
++      
++config GRKERNSEC_SOCKET
++      bool "Socket restrictions"
++      depends on NET
++      help
++        If you say Y here, you will be able to choose from several options.
++        If you assign a GID on your system and add it to the supplementary
++        groups of users you want to restrict socket access to, this patch
++        will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++      bool "Deny any sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to connect to other hosts from your machine or run server
++        applications from your machine.  If the sysctl option is enabled, a
++        sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++      int "GID to deny all sockets for"
++      depends on GRKERNSEC_SOCKET_ALL
++      default 1004
++      help
++        Here you can choose the GID to disable socket access for. Remember to
++        add the users you want socket access disabled for to the GID
++        specified here.  If the sysctl option is enabled, a sysctl option
++        with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++      bool "Deny client sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to connect to other hosts from your machine, but will be
++        able to run servers.  If this option is enabled, all users in the group
++        you specify will have to use passive mode when initiating ftp transfers
++        from the shell on your machine.  If the sysctl option is enabled, a
++        sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++      int "GID to deny client sockets for"
++      depends on GRKERNSEC_SOCKET_CLIENT
++      default 1003
++      help
++        Here you can choose the GID to disable client socket access for.
++        Remember to add the users you want client socket access disabled for to
++        the GID specified here.  If the sysctl option is enabled, a sysctl
++        option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++      bool "Deny server sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to run server applications from your machine.  If the sysctl
++        option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++      int "GID to deny server sockets for"
++      depends on GRKERNSEC_SOCKET_SERVER
++      default 1002
++      help
++        Here you can choose the GID to disable server socket access for.
++        Remember to add the users you want server socket access disabled for to
++        the GID specified here.  If the sysctl option is enabled, a sysctl
++        option with name "socket_server_gid" is created.
++
++endmenu
++menu "Sysctl Support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++      bool "Sysctl support"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, you will be able to change the options that
++        grsecurity runs with at bootup, without having to recompile your
++        kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
++        to enable (1) or disable (0) various features.  All the sysctl entries
++        are mutable until the "grsec_lock" entry is set to a non-zero value.
++        All features enabled in the kernel configuration are disabled at boot
++        if you do not say Y to the "Turn on features by default" option.
++        All options should be set at startup, and the grsec_lock entry should
++        be set to a non-zero value after all the options are set.
++        *THIS IS EXTREMELY IMPORTANT*
++
++config GRKERNSEC_SYSCTL_DISTRO
++      bool "Extra sysctl support for distro makers (READ HELP)"
++      depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
++      help
++        If you say Y here, additional sysctl options will be created
++        for features that affect processes running as root.  Therefore,
++        it is critical when using this option that the grsec_lock entry be
++        enabled after boot.  Only distros with prebuilt kernel packages
++        with this option enabled that can ensure grsec_lock is enabled
++        after boot should use this option.
++        *Failure to set grsec_lock after boot makes all grsec features
++        this option covers useless*
++
++        Currently this option creates the following sysctl entries:
++        "Disable Privileged I/O": "disable_priv_io"   
++
++config GRKERNSEC_SYSCTL_ON
++      bool "Turn on features by default"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_SYSCTL
++      help
++        If you say Y here, instead of having all features enabled in the
++        kernel configuration disabled at boot time, the features will be
++        enabled at boot time.  It is recommended you say Y here unless
++        there is some reason you would want all sysctl-tunable features to
++        be disabled by default.  As mentioned elsewhere, it is important
++        to enable the grsec_lock entry once you have finished modifying
++        the sysctl entries.
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++      int "Seconds in between log messages (minimum)"
++      default 10
++      help
++        This option allows you to enforce the number of seconds between
++        grsecurity log messages.  The default should be suitable for most
++        people, however, if you choose to change it, choose a value small enough
++        to allow informative logs to be produced, but large enough to
++        prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++      int "Number of messages in a burst (maximum)"
++      default 6
++      help
++        This option allows you to choose the maximum number of messages allowed
++        within the flood time interval you chose in a separate option.  The
++        default should be suitable for most people, however if you find that
++        many of your logs are being interpreted as flooding, you may want to
++        raise this value.
++
++endmenu
+diff --git a/grsecurity/Makefile b/grsecurity/Makefile
+new file mode 100644
+index 0000000..36845aa
+--- /dev/null
++++ b/grsecurity/Makefile
+@@ -0,0 +1,42 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001-2009 it has been completely redesigned by Brad Spengler
++# into an RBAC system
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler - Open Source Security, Inc., and released 
++# under the GPL v2 or higher
++
++KBUILD_CFLAGS += -Werror
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++      grsec_mount.o grsec_sig.o grsec_sysctl.o \
++      grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
++      gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++      gracl_learn.o grsec_log.o
++ifdef CONFIG_COMPAT
++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
++endif
++
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifdef CONFIG_NET
++obj-y += grsec_sock.o
++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
++endif
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
++ifdef CONFIG_GRKERNSEC_HIDESYM
++extra-y := grsec_hidesym.o
++$(obj)/grsec_hidesym.o:
++      @-chmod -f 500 /boot
++      @-chmod -f 500 /lib/modules
++      @-chmod -f 500 /lib64/modules
++      @-chmod -f 500 /lib32/modules
++      @-chmod -f 700 .
++      @echo '  grsec: protected kernel image paths'
++endif
+diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
+new file mode 100644
+index 0000000..c0793fd
+--- /dev/null
++++ b/grsecurity/gracl.c
+@@ -0,0 +1,4178 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/security.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/stop_machine.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++#include <linux/lglock.h>
++#include <linux/hugetlb.h>
++#include <linux/posix-timers.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++#include "../fs/mount.h"
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++extern struct lglock vfsmount_lock;
++
++static struct acl_role_db acl_role_set;
++static struct name_db name_set;
++static struct inodev_db inodev_set;
++
++/* for keeping track of userspace pointers used for subjects, so we
++   can share references in the kernel as well
++*/
++
++static struct path real_root;
++
++static struct acl_subj_map_db subj_map_set;
++
++static struct acl_role_label *default_role;
++
++static struct acl_role_label *role_list;
++
++static u16 acl_sp_role_value;
++
++extern char *gr_shared_page[4];
++static DEFINE_MUTEX(gr_dev_mutex);
++DEFINE_RWLOCK(gr_inode_lock);
++
++struct gr_arg *gr_usermode;
++
++static unsigned int gr_status __read_only = GR_STATUS_INIT;
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++unsigned char *gr_system_salt;
++unsigned char *gr_system_sum;
++
++static struct sprole_pw **acl_special_roles = NULL;
++static __u16 num_sprole_pws = 0;
++
++static struct acl_role_label *kernel_role = NULL;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++#ifdef CONFIG_NET
++extern struct vfsmount *sock_mnt;
++#endif
++
++extern struct vfsmount *pipe_mnt;
++extern struct vfsmount *shm_mnt;
++
++#ifdef CONFIG_HUGETLBFS
++extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
++#endif
++
++static struct acl_object_label *fakefs_obj_rw;
++static struct acl_object_label *fakefs_obj_rwx;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++      if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++      if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++      if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++      if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++      if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++      if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++      if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
++{
++      if (copy_from_user(trans, userp, sizeof(struct role_transition)))
++              return -EFAULT;
++
++      return 0;
++}
++
++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
++{
++      if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
++{
++      if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
++              return -EFAULT;
++
++      if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++      if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static size_t get_gr_arg_wrapper_size_normal(void)
++{
++      return sizeof(struct gr_arg_wrapper);
++}
++
++#ifdef CONFIG_COMPAT
++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
++extern size_t get_gr_arg_wrapper_size_compat(void);
++
++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
++size_t (* get_gr_arg_wrapper_size)(void) __read_only;
++
++#else
++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
++#define copy_gr_arg copy_gr_arg_normal
++#define copy_gr_hash_struct copy_gr_hash_struct_normal
++#define copy_acl_object_label copy_acl_object_label_normal
++#define copy_acl_subject_label copy_acl_subject_label_normal
++#define copy_acl_role_label copy_acl_role_label_normal
++#define copy_acl_ip_label copy_acl_ip_label_normal
++#define copy_pointer_from_array copy_pointer_from_array_normal
++#define copy_sprole_pw copy_sprole_pw_normal
++#define copy_role_transition copy_role_transition_normal
++#define copy_role_allowed_ip copy_role_allowed_ip_normal
++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
++#endif
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++      return (gr_status & GR_READY);
++}
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++      if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++              return BTRFS_I(dentry->d_inode)->root->anon_dev;
++      else
++#endif
++              return dentry->d_sb->s_dev;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++      return __get_dev(dentry);
++}
++
++static char gr_task_roletype_to_char(struct task_struct *task)
++{
++      switch (task->role->roletype &
++              (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++               GR_ROLE_SPECIAL)) {
++      case GR_ROLE_DEFAULT:
++              return 'D';
++      case GR_ROLE_USER:
++              return 'U';
++      case GR_ROLE_GROUP:
++              return 'G';
++      case GR_ROLE_SPECIAL:
++              return 'S';
++      }
++
++      return 'X';
++}
++
++char gr_roletype_to_char(void)
++{
++      return gr_task_roletype_to_char(current);
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++      if (current->role->roletype & GR_ROLE_TPE)
++              return 1;
++      else
++              return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (inode && S_ISBLK(inode->i_mode) &&
++          grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++          !capable(CAP_SYS_RAWIO))
++              return 1;
++#endif
++      return 0;
++}
++
++static int
++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
++{
++      if (likely(lena != lenb))
++              return 0;
++
++      return !memcmp(a, b, lena);
++}
++
++static int prepend(char **buffer, int *buflen, const char *str, int namelen)
++{
++      *buflen -= namelen;
++      if (*buflen < 0)
++              return -ENAMETOOLONG;
++      *buffer -= namelen;
++      memcpy(*buffer, str, namelen);
++      return 0;
++}
++
++static int prepend_name(char **buffer, int *buflen, struct qstr *name)
++{
++      return prepend(buffer, buflen, name->name, name->len);
++}
++
++static int prepend_path(const struct path *path, struct path *root,
++                      char **buffer, int *buflen)
++{
++      struct dentry *dentry = path->dentry;
++      struct vfsmount *vfsmnt = path->mnt;
++      struct mount *mnt = real_mount(vfsmnt);
++      bool slash = false;
++      int error = 0;
++
++      while (dentry != root->dentry || vfsmnt != root->mnt) {
++              struct dentry * parent;
++
++              if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++                      /* Global root? */
++                      if (!mnt_has_parent(mnt)) {
++                              goto out;
++                      }
++                      dentry = mnt->mnt_mountpoint;
++                      mnt = mnt->mnt_parent;
++                      vfsmnt = &mnt->mnt;
++                      continue;
++              }
++              parent = dentry->d_parent;
++              prefetch(parent);
++              spin_lock(&dentry->d_lock);
++              error = prepend_name(buffer, buflen, &dentry->d_name);
++              spin_unlock(&dentry->d_lock);
++              if (!error)
++                      error = prepend(buffer, buflen, "/", 1);
++              if (error)
++                      break;
++
++              slash = true;
++              dentry = parent;
++      }
++
++out:
++      if (!error && !slash)
++              error = prepend(buffer, buflen, "/", 1);
++
++      return error;
++}
++
++/* this must be called with vfsmount_lock and rename_lock held */
++
++static char *__our_d_path(const struct path *path, struct path *root,
++                      char *buf, int buflen)
++{
++      char *res = buf + buflen;
++      int error;
++
++      prepend(&res, &buflen, "\0", 1);
++      error = prepend_path(path, root, &res, &buflen);
++      if (error)
++              return ERR_PTR(error);
++
++      return res;
++}
++
++static char *
++gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
++{
++      char *retval;
++
++      retval = __our_d_path(path, root, buf, buflen);
++      if (unlikely(IS_ERR(retval)))
++              retval = strcpy(buf, "<path too long>");
++      else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
++              retval[1] = '\0';
++
++      return retval;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++              char *buf, int buflen)
++{
++      struct path path;
++      char *res;
++
++      path.dentry = (struct dentry *)dentry;
++      path.mnt = (struct vfsmount *)vfsmnt;
++
++      /* we can use real_root.dentry, real_root.mnt, because this is only called
++         by the RBAC system */
++      res = gen_full_path(&path, &real_root, buf, buflen);
++
++      return res;
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++          char *buf, int buflen)
++{
++      char *res;
++      struct path path;
++      struct path root;
++      struct task_struct *reaper = init_pid_ns.child_reaper;
++
++      path.dentry = (struct dentry *)dentry;
++      path.mnt = (struct vfsmount *)vfsmnt;
++
++      /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
++      get_fs_root(reaper->fs, &root);
++
++      br_read_lock(&vfsmount_lock);
++      write_seqlock(&rename_lock);
++      res = gen_full_path(&path, &root, buf, buflen);
++      write_sequnlock(&rename_lock);
++      br_read_unlock(&vfsmount_lock);
++
++      path_put(&root);
++      return res;
++}
++
++static char *
++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      char *ret;
++      br_read_lock(&vfsmount_lock);
++      write_seqlock(&rename_lock);
++      ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++                           PAGE_SIZE);
++      write_sequnlock(&rename_lock);
++      br_read_unlock(&vfsmount_lock);
++      return ret;
++}
++
++static char *
++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      char *ret;
++      char *buf;
++      int buflen;
++
++      br_read_lock(&vfsmount_lock);
++      write_seqlock(&rename_lock);
++      buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++      ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
++      buflen = (int)(ret - buf);
++      if (buflen >= 5)
++              prepend(&ret, &buflen, "/proc", 5);
++      else
++              ret = strcpy(buf, "<path too long>");
++      write_sequnlock(&rename_lock);
++      br_read_unlock(&vfsmount_lock);
++      return ret;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++                           PAGE_SIZE);
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++      /* masks off auditable permission flags, then shifts them to create
++         auditing flags, and adds the special case of append auditing if
++         we're requesting write */
++      return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
++}
++
++struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++      unsigned int index = gr_shash(userp, subj_map_set.s_size);
++      struct subject_map *match;
++
++      match = subj_map_set.s_hash[index];
++
++      while (match && match->user != userp)
++              match = match->next;
++
++      if (match != NULL)
++              return match->kernel;
++      else
++              return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++      unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
++      struct subject_map **curr;
++
++      subjmap->prev = NULL;
++
++      curr = &subj_map_set.s_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = subjmap;
++
++      subjmap->next = *curr;
++      *curr = subjmap;
++
++      return;
++}
++
++static struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++                    const gid_t gid)
++{
++      unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++      struct acl_role_label *match;
++      struct role_allowed_ip *ipp;
++      unsigned int x;
++      u32 curr_ip = task->signal->curr_ip;
++
++      task->signal->saved_ip = curr_ip;
++
++      match = acl_role_set.r_hash[index];
++
++      while (match) {
++              if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++                      for (x = 0; x < match->domain_child_num; x++) {
++                              if (match->domain_children[x] == uid)
++                                      goto found;
++                      }
++              } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++                      break;
++              match = match->next;
++      }
++found:
++      if (match == NULL) {
++            try_group:
++              index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++              match = acl_role_set.r_hash[index];
++
++              while (match) {
++                      if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++                              for (x = 0; x < match->domain_child_num; x++) {
++                                      if (match->domain_children[x] == gid)
++                                              goto found2;
++                              }
++                      } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++                              break;
++                      match = match->next;
++              }
++found2:
++              if (match == NULL)
++                      match = default_role;
++              if (match->allowed_ips == NULL)
++                      return match;
++              else {
++                      for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++                              if (likely
++                                  ((ntohl(curr_ip) & ipp->netmask) ==
++                                   (ntohl(ipp->addr) & ipp->netmask)))
++                                      return match;
++                      }
++                      match = default_role;
++              }
++      } else if (match->allowed_ips == NULL) {
++              return match;
++      } else {
++              for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++                      if (likely
++                          ((ntohl(curr_ip) & ipp->netmask) ==
++                           (ntohl(ipp->addr) & ipp->netmask)))
++                              return match;
++              }
++              goto try_group;
++      }
++
++      return match;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++                    const struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
++                        const struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             !(match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && (match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++                   const struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++                   const struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             !(match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && (match->mode & GR_DELETED))
++              return match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++static struct name_entry *
++lookup_name_entry(const char *name)
++{
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(name, len);
++      unsigned int index = key % name_set.n_size;
++      struct name_entry *match;
++
++      match = name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
++              match = match->next;
++
++      return match;
++}
++
++static struct name_entry *
++lookup_name_entry_create(const char *name)
++{
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(name, len);
++      unsigned int index = key % name_set.n_size;
++      struct name_entry *match;
++
++      match = name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++                       !match->deleted))
++              match = match->next;
++
++      if (match && match->deleted)
++              return match;
++
++      match = name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++                       match->deleted))
++              match = match->next;
++
++      if (match && !match->deleted)
++              return match;
++      else
++              return NULL;
++}
++
++static struct inodev_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++      unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
++      struct inodev_entry *match;
++
++      match = inodev_set.i_hash[index];
++
++      while (match && (match->nentry->inode != ino || match->nentry->device != dev))
++              match = match->next;
++
++      return match;
++}
++
++static void
++insert_inodev_entry(struct inodev_entry *entry)
++{
++      unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
++                                  inodev_set.i_size);
++      struct inodev_entry **curr;
++
++      entry->prev = NULL;
++
++      curr = &inodev_set.i_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = entry;
++      
++      entry->next = *curr;
++      *curr = entry;
++
++      return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++      unsigned int index =
++          gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++      struct acl_role_label **curr;
++      struct acl_role_label *tmp, *tmp2;
++
++      curr = &acl_role_set.r_hash[index];
++
++      /* simple case, slot is empty, just set it to our role */
++      if (*curr == NULL) {
++              *curr = role;
++      } else {
++              /* example:
++                 1 -> 2 -> 3 (adding 2 -> 3 to here)
++                 2 -> 3
++              */
++              /* first check to see if we can already be reached via this slot */
++              tmp = *curr;
++              while (tmp && tmp != role)
++                      tmp = tmp->next;
++              if (tmp == role) {
++                      /* we don't need to add ourselves to this slot's chain */
++                      return;
++              }
++              /* we need to add ourselves to this chain, two cases */
++              if (role->next == NULL) {
++                      /* simple case, append the current chain to our role */
++                      role->next = *curr;
++                      *curr = role;
++              } else {
++                      /* 1 -> 2 -> 3 -> 4
++                         2 -> 3 -> 4
++                         3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
++                      */                         
++                      /* trickier case: walk our role's chain until we find
++                         the role for the start of the current slot's chain */
++                      tmp = role;
++                      tmp2 = *curr;
++                      while (tmp->next && tmp->next != tmp2)
++                              tmp = tmp->next;
++                      if (tmp->next == tmp2) {
++                              /* from example above, we found 3, so just
++                                 replace this slot's chain with ours */
++                              *curr = role;
++                      } else {
++                              /* we didn't find a subset of our role's chain
++                                 in the current slot's chain, so append their
++                                 chain to ours, and set us as the first role in
++                                 the slot's chain
++
++                                 we could fold this case with the case above,
++                                 but making it explicit for clarity
++                              */
++                              tmp->next = tmp2;
++                              *curr = role;
++                      }
++              }
++      }
++
++      return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++      int i;
++
++      if (role_list == NULL) {
++              role_list = role;
++              role->prev = NULL;
++      } else {
++              role->prev = role_list;
++              role_list = role;
++      }
++      
++      /* used for hash chains */
++      role->next = NULL;
++
++      if (role->roletype & GR_ROLE_DOMAIN) {
++              for (i = 0; i < role->domain_child_num; i++)
++                      __insert_acl_role_label(role, role->domain_children[i]);
++      } else
++              __insert_acl_role_label(role, role->uidgid);
++}
++                                      
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
++{
++      struct name_entry **curr, *nentry;
++      struct inodev_entry *ientry;
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(name, len);
++      unsigned int index = key % name_set.n_size;
++
++      curr = &name_set.n_hash[index];
++
++      while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
++              curr = &((*curr)->next);
++
++      if (*curr != NULL)
++              return 1;
++
++      nentry = acl_alloc(sizeof (struct name_entry));
++      if (nentry == NULL)
++              return 0;
++      ientry = acl_alloc(sizeof (struct inodev_entry));
++      if (ientry == NULL)
++              return 0;
++      ientry->nentry = nentry;
++
++      nentry->key = key;
++      nentry->name = name;
++      nentry->inode = inode;
++      nentry->device = device;
++      nentry->len = len;
++      nentry->deleted = deleted;
++
++      nentry->prev = NULL;
++      curr = &name_set.n_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = nentry;
++      nentry->next = *curr;
++      *curr = nentry;
++
++      /* insert us into the table searchable by inode/dev */
++      insert_inodev_entry(ientry);
++
++      return 1;
++}
++
++static void
++insert_acl_obj_label(struct acl_object_label *obj,
++                   struct acl_subject_label *subj)
++{
++      unsigned int index =
++          gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
++      struct acl_object_label **curr;
++
++      
++      obj->prev = NULL;
++
++      curr = &subj->obj_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = obj;
++
++      obj->next = *curr;
++      *curr = obj;
++
++      return;
++}
++
++static void
++insert_acl_subj_label(struct acl_subject_label *obj,
++                    struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
++      struct acl_subject_label **curr;
++
++      obj->prev = NULL;
++
++      curr = &role->subj_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = obj;
++
++      obj->next = *curr;
++      *curr = obj;
++
++      return;
++}
++
++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
++
++static void *
++create_table(__u32 * len, int elementsize)
++{
++      unsigned int table_sizes[] = {
++              7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++              32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++              4194301, 8388593, 16777213, 33554393, 67108859
++      };
++      void *newtable = NULL;
++      unsigned int pwr = 0;
++
++      while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++             table_sizes[pwr] <= *len)
++              pwr++;
++
++      if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
++              return newtable;
++
++      if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
++              newtable =
++                  kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
++      else
++              newtable = vmalloc(table_sizes[pwr] * elementsize);
++
++      *len = table_sizes[pwr];
++
++      return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg)
++{
++      struct task_struct *reaper = init_pid_ns.child_reaper;
++      unsigned int stacksize;
++
++      subj_map_set.s_size = arg->role_db.num_subjects;
++      acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++      name_set.n_size = arg->role_db.num_objects;
++      inodev_set.i_size = arg->role_db.num_objects;
++
++      if (!subj_map_set.s_size || !acl_role_set.r_size ||
++          !name_set.n_size || !inodev_set.i_size)
++              return 1;
++
++      if (!gr_init_uidset())
++              return 1;
++
++      /* set up the stack that holds allocation info */
++
++      stacksize = arg->role_db.num_pointers + 5;
++
++      if (!acl_alloc_stack_init(stacksize))
++              return 1;
++
++      /* grab reference for the real root dentry and vfsmount */
++      get_fs_root(reaper->fs, &real_root);
++      
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
++#endif
++
++      fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
++      if (fakefs_obj_rw == NULL)
++              return 1;
++      fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
++
++      fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
++      if (fakefs_obj_rwx == NULL)
++              return 1;
++      fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
++
++      subj_map_set.s_hash =
++          (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
++      acl_role_set.r_hash =
++          (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
++      name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
++      inodev_set.i_hash =
++          (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
++
++      if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
++          !name_set.n_hash || !inodev_set.i_hash)
++              return 1;
++
++      memset(subj_map_set.s_hash, 0,
++             sizeof(struct subject_map *) * subj_map_set.s_size);
++      memset(acl_role_set.r_hash, 0,
++             sizeof (struct acl_role_label *) * acl_role_set.r_size);
++      memset(name_set.n_hash, 0,
++             sizeof (struct name_entry *) * name_set.n_size);
++      memset(inodev_set.i_hash, 0,
++             sizeof (struct inodev_entry *) * inodev_set.i_size);
++
++      return 0;
++}
++
++/* free information not needed after startup
++   currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++      __u32 i;
++
++      if (subj_map_set.s_hash) {
++              for (i = 0; i < subj_map_set.s_size; i++) {
++                      if (subj_map_set.s_hash[i]) {
++                              kfree(subj_map_set.s_hash[i]);
++                              subj_map_set.s_hash[i] = NULL;
++                      }
++              }
++
++              if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
++                  PAGE_SIZE)
++                      kfree(subj_map_set.s_hash);
++              else
++                      vfree(subj_map_set.s_hash);
++      }
++
++      return;
++}
++
++static void
++free_variables(void)
++{
++      struct acl_subject_label *s;
++      struct acl_role_label *r;
++      struct task_struct *task, *task2;
++      unsigned int x;
++
++      gr_clear_learn_entries();
++
++      read_lock(&tasklist_lock);
++      do_each_thread(task2, task) {
++              task->acl_sp_role = 0;
++              task->acl_role_id = 0;
++              task->acl = NULL;
++              task->role = NULL;
++      } while_each_thread(task2, task);
++      read_unlock(&tasklist_lock);
++
++      /* release the reference to the real root dentry and vfsmount */
++      path_put(&real_root);
++      memset(&real_root, 0, sizeof(real_root));
++
++      /* free all object hash tables */
++
++      FOR_EACH_ROLE_START(r)
++              if (r->subj_hash == NULL)
++                      goto next_role;
++              FOR_EACH_SUBJECT_START(r, s, x)
++                      if (s->obj_hash == NULL)
++                              break;
++                      if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++                              kfree(s->obj_hash);
++                      else
++                              vfree(s->obj_hash);
++              FOR_EACH_SUBJECT_END(s, x)
++              FOR_EACH_NESTED_SUBJECT_START(r, s)
++                      if (s->obj_hash == NULL)
++                              break;
++                      if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++                              kfree(s->obj_hash);
++                      else
++                              vfree(s->obj_hash);
++              FOR_EACH_NESTED_SUBJECT_END(s)
++              if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++                      kfree(r->subj_hash);
++              else
++                      vfree(r->subj_hash);
++              r->subj_hash = NULL;
++next_role:
++      FOR_EACH_ROLE_END(r)
++
++      acl_free_all();
++
++      if (acl_role_set.r_hash) {
++              if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++                  PAGE_SIZE)
++                      kfree(acl_role_set.r_hash);
++              else
++                      vfree(acl_role_set.r_hash);
++      }
++      if (name_set.n_hash) {
++              if ((name_set.n_size * sizeof (struct name_entry *)) <=
++                  PAGE_SIZE)
++                      kfree(name_set.n_hash);
++              else
++                      vfree(name_set.n_hash);
++      }
++
++      if (inodev_set.i_hash) {
++              if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
++                  PAGE_SIZE)
++                      kfree(inodev_set.i_hash);
++              else
++                      vfree(inodev_set.i_hash);
++      }
++
++      gr_free_uidset();
++
++      memset(&name_set, 0, sizeof (struct name_db));
++      memset(&inodev_set, 0, sizeof (struct inodev_db));
++      memset(&acl_role_set, 0, sizeof (struct acl_role_db));
++      memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++      default_role = NULL;
++      kernel_role = NULL;
++      role_list = NULL;
++
++      return;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++
++static int alloc_and_copy_string(char **name, unsigned int maxlen)
++{
++      unsigned int len = strnlen_user(*name, maxlen);
++      char *tmp;
++
++      if (!len || len >= maxlen)
++              return -EINVAL;
++
++      if ((tmp = (char *) acl_alloc(len)) == NULL)
++              return -ENOMEM;
++
++      if (copy_from_user(tmp, *name, len))
++              return -EFAULT;
++
++      tmp[len-1] = '\0';
++      *name = tmp;
++
++      return 0;
++}
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++      struct acl_object_label *g_tmp, **guser;
++      int error;
++
++      if (obj->globbed == NULL)
++              return 0;
++
++      guser = &obj->globbed;
++      while (*guser) {
++              g_tmp = (struct acl_object_label *)
++                      acl_alloc(sizeof (struct acl_object_label));
++              if (g_tmp == NULL)
++                      return -ENOMEM;
++
++              if (copy_acl_object_label(g_tmp, *guser))
++                      return -EFAULT;
++
++              error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
++              if (error)
++                      return error;
++
++              *guser = g_tmp;
++              guser = &(g_tmp->next);
++      }
++
++      return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++             struct acl_role_label *role)
++{
++      struct acl_object_label *o_tmp;
++      int ret;
++
++      while (userp) {
++              if ((o_tmp = (struct acl_object_label *)
++                   acl_alloc(sizeof (struct acl_object_label))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_acl_object_label(o_tmp, userp))
++                      return -EFAULT;
++
++              userp = o_tmp->prev;
++
++              ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
++              if (ret)
++                      return ret;
++
++              insert_acl_obj_label(o_tmp, subj);
++              if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++                                     o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
++                      return -ENOMEM;
++
++              ret = copy_user_glob(o_tmp);
++              if (ret)
++                      return ret;
++
++              if (o_tmp->nested) {
++                      int already_copied;
++
++                      o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
++                      if (IS_ERR(o_tmp->nested))
++                              return PTR_ERR(o_tmp->nested);
++
++                      /* insert into nested subject list if we haven't copied this one yet
++                         to prevent duplicate entries */
++                      if (!already_copied) {
++                              o_tmp->nested->next = role->hash->first;
++                              role->hash->first = o_tmp->nested;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++      struct acl_subject_label s_tmp;
++      __u32 num = 0;
++
++      while (userp) {
++              if (copy_acl_subject_label(&s_tmp, userp))
++                      break;
++
++              userp = s_tmp.prev;
++      }
++
++      return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++      struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++      ruserip = rolep->allowed_ips;
++
++      while (ruserip) {
++              rlast = rtmp;
++
++              if ((rtmp = (struct role_allowed_ip *)
++                   acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_role_allowed_ip(rtmp, ruserip))
++                      return -EFAULT;
++
++              ruserip = rtmp->prev;
++
++              if (!rlast) {
++                      rtmp->prev = NULL;
++                      rolep->allowed_ips = rtmp;
++              } else {
++                      rlast->next = rtmp;
++                      rtmp->prev = rlast;
++              }
++
++              if (!ruserip)
++                      rtmp->next = NULL;
++      }
++
++      return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++      struct role_transition *rusertp, *rtmp = NULL, *rlast;
++      int error;
++
++      rusertp = rolep->transitions;
++
++      while (rusertp) {
++              rlast = rtmp;
++
++              if ((rtmp = (struct role_transition *)
++                   acl_alloc(sizeof (struct role_transition))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_role_transition(rtmp, rusertp))
++                      return -EFAULT;
++
++              rusertp = rtmp->prev;
++
++              error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
++              if (error)
++                      return error;
++
++              if (!rlast) {
++                      rtmp->prev = NULL;
++                      rolep->transitions = rtmp;
++              } else {
++                      rlast->next = rtmp;
++                      rtmp->prev = rlast;
++              }
++
++              if (!rusertp)
++                      rtmp->next = NULL;
++      }
++
++      return 0;
++}
++
++static __u32 count_user_objs(const struct acl_object_label __user *userp)
++{
++      struct acl_object_label o_tmp;
++      __u32 num = 0;
++
++      while (userp) {
++              if (copy_acl_object_label(&o_tmp, userp))
++                      break;
++
++              userp = o_tmp.prev;
++              num++;
++      }
++
++      return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
++{
++      struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++      __u32 num_objs;
++      struct acl_ip_label **i_tmp, *i_utmp2;
++      struct gr_hash_struct ghash;
++      struct subject_map *subjmap;
++      unsigned int i_num;
++      int err;
++
++      if (already_copied != NULL)
++              *already_copied = 0;
++
++      s_tmp = lookup_subject_map(userp);
++
++      /* we've already copied this subject into the kernel, just return
++         the reference to it, and don't copy it over again
++      */
++      if (s_tmp) {
++              if (already_copied != NULL)
++                      *already_copied = 1;
++              return(s_tmp);
++      }
++
++      if ((s_tmp = (struct acl_subject_label *)
++          acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++              return ERR_PTR(-ENOMEM);
++
++      subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++      if (subjmap == NULL)
++              return ERR_PTR(-ENOMEM);
++
++      subjmap->user = userp;
++      subjmap->kernel = s_tmp;
++      insert_subj_map_entry(subjmap);
++
++      if (copy_acl_subject_label(s_tmp, userp))
++              return ERR_PTR(-EFAULT);
++
++      err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
++      if (err)
++              return ERR_PTR(err);
++
++      if (!strcmp(s_tmp->filename, "/"))
++              role->root_label = s_tmp;
++
++      if (copy_gr_hash_struct(&ghash, s_tmp->hash))
++              return ERR_PTR(-EFAULT);
++
++      /* copy user and group transition tables */
++
++      if (s_tmp->user_trans_num) {
++              uid_t *uidlist;
++
++              uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
++              if (uidlist == NULL)
++                      return ERR_PTR(-ENOMEM);
++              if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++                      return ERR_PTR(-EFAULT);
++
++              s_tmp->user_transitions = uidlist;
++      }
++
++      if (s_tmp->group_trans_num) {
++              gid_t *gidlist;
++
++              gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
++              if (gidlist == NULL)
++                      return ERR_PTR(-ENOMEM);
++              if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++                      return ERR_PTR(-EFAULT);
++
++              s_tmp->group_transitions = gidlist;
++      }
++
++      /* set up object hash table */
++      num_objs = count_user_objs(ghash.first);
++
++      s_tmp->obj_hash_size = num_objs;
++      s_tmp->obj_hash =
++          (struct acl_object_label **)
++          create_table(&(s_tmp->obj_hash_size), sizeof(void *));
++
++      if (!s_tmp->obj_hash)
++              return ERR_PTR(-ENOMEM);
++
++      memset(s_tmp->obj_hash, 0,
++             s_tmp->obj_hash_size *
++             sizeof (struct acl_object_label *));
++
++      /* add in objects */
++      err = copy_user_objs(ghash.first, s_tmp, role);
++
++      if (err)
++              return ERR_PTR(err);
++
++      /* set pointer for parent subject */
++      if (s_tmp->parent_subject) {
++              s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
++
++              if (IS_ERR(s_tmp2))
++                      return s_tmp2;
++
++              s_tmp->parent_subject = s_tmp2;
++      }
++
++      /* add in ip acls */
++
++      if (!s_tmp->ip_num) {
++              s_tmp->ips = NULL;
++              goto insert;
++      }
++
++      i_tmp =
++          (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
++                                             sizeof (struct acl_ip_label *));
++
++      if (!i_tmp)
++              return ERR_PTR(-ENOMEM);
++
++      for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++              *(i_tmp + i_num) =
++                  (struct acl_ip_label *)
++                  acl_alloc(sizeof (struct acl_ip_label));
++              if (!*(i_tmp + i_num))
++                      return ERR_PTR(-ENOMEM);
++
++              if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
++                      return ERR_PTR(-EFAULT);
++
++              if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
++                      return ERR_PTR(-EFAULT);
++              
++              if ((*(i_tmp + i_num))->iface == NULL)
++                      continue;
++
++              err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
++              if (err)
++                      return ERR_PTR(err);
++      }
++
++      s_tmp->ips = i_tmp;
++
++insert:
++      if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++                             s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
++              return ERR_PTR(-ENOMEM);
++
++      return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++      struct acl_subject_label s_pre;
++      struct acl_subject_label * ret;
++      int err;
++
++      while (userp) {
++              if (copy_acl_subject_label(&s_pre, userp))
++                      return -EFAULT;
++              
++              ret = do_copy_user_subj(userp, role, NULL);
++
++              err = PTR_ERR(ret);
++              if (IS_ERR(ret))
++                      return err;
++
++              insert_acl_subj_label(ret, role);
++
++              userp = s_pre.prev;
++      }
++
++      return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++      struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++      struct acl_subject_label *subj_list;
++      struct sprole_pw *sptmp;
++      struct gr_hash_struct *ghash;
++      uid_t *domainlist;
++      unsigned int r_num;
++      int err = 0;
++      __u16 i;
++      __u32 num_subjs;
++
++      /* we need a default and kernel role */
++      if (arg->role_db.num_roles < 2)
++              return -EINVAL;
++
++      /* copy special role authentication info from userspace */
++
++      num_sprole_pws = arg->num_sprole_pws;
++      acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
++
++      if (!acl_special_roles && num_sprole_pws)
++              return -ENOMEM;
++
++      for (i = 0; i < num_sprole_pws; i++) {
++              sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++              if (!sptmp)
++                      return -ENOMEM;
++              if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
++                      return -EFAULT;
++
++              err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
++              if (err)
++                      return err;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++              printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
++#endif
++
++              acl_special_roles[i] = sptmp;
++      }
++
++      r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++      for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++              r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++              if (!r_tmp)
++                      return -ENOMEM;
++
++              if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
++                      return -EFAULT;
++
++              if (copy_acl_role_label(r_tmp, r_utmp2))
++                      return -EFAULT;
++
++              err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
++              if (err)
++                      return err;
++
++              if (!strcmp(r_tmp->rolename, "default")
++                  && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++                      default_role = r_tmp;
++              } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++                      kernel_role = r_tmp;
++              }
++
++              if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_gr_hash_struct(ghash, r_tmp->hash))
++                      return -EFAULT;
++
++              r_tmp->hash = ghash;
++
++              num_subjs = count_user_subjs(r_tmp->hash->first);
++
++              r_tmp->subj_hash_size = num_subjs;
++              r_tmp->subj_hash =
++                  (struct acl_subject_label **)
++                  create_table(&(r_tmp->subj_hash_size), sizeof(void *));
++
++              if (!r_tmp->subj_hash)
++                      return -ENOMEM;
++
++              err = copy_user_allowedips(r_tmp);
++              if (err)
++                      return err;
++
++              /* copy domain info */
++              if (r_tmp->domain_children != NULL) {
++                      domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
++                      if (domainlist == NULL)
++                              return -ENOMEM;
++
++                      if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
++                              return -EFAULT;
++
++                      r_tmp->domain_children = domainlist;
++              }
++
++              err = copy_user_transitions(r_tmp);
++              if (err)
++                      return err;
++
++              memset(r_tmp->subj_hash, 0,
++                     r_tmp->subj_hash_size *
++                     sizeof (struct acl_subject_label *));
++
++              /* acquire the list of subjects, then NULL out
++                 the list prior to parsing the subjects for this role,
++                 as during this parsing the list is replaced with a list
++                 of *nested* subjects for the role
++              */
++              subj_list = r_tmp->hash->first;
++
++              /* set nested subject list to null */
++              r_tmp->hash->first = NULL;
++
++              err = copy_user_subjs(subj_list, r_tmp);
++
++              if (err)
++                      return err;
++
++              insert_acl_role_label(r_tmp);
++      }
++
++      if (default_role == NULL || kernel_role == NULL)
++              return -EINVAL;
++
++      return err;
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++      int error = 0;
++
++      memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++      memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++      if (init_variables(args)) {
++              gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++              error = -ENOMEM;
++              free_variables();
++              goto out;
++      }
++
++      error = copy_user_acl(args);
++      free_init_variables();
++      if (error) {
++              free_variables();
++              goto out;
++      }
++
++      if ((error = gr_set_acls(0))) {
++              free_variables();
++              goto out;
++      }
++
++      pax_open_kernel();
++      gr_status |= GR_READY;
++      pax_close_kernel();
++
++      out:
++      return error;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *p, const char *n)
++{
++      char c;
++
++      while ((c = *p++) != '\0') {
++      switch (c) {
++              case '?':
++                      if (*n == '\0')
++                              return 1;
++                      else if (*n == '/')
++                              return 1;
++                      break;
++              case '\\':
++                      if (*n != c)
++                              return 1;
++                      break;
++              case '*':
++                      for (c = *p++; c == '?' || c == '*'; c = *p++) {
++                              if (*n == '/')
++                                      return 1;
++                              else if (c == '?') {
++                                      if (*n == '\0')
++                                              return 1;
++                                      else
++                                              ++n;
++                              }
++                      }
++                      if (c == '\0') {
++                              return 0;
++                      } else {
++                              const char *endp;
++
++                              if ((endp = strchr(n, '/')) == NULL)
++                                      endp = n + strlen(n);
++
++                              if (c == '[') {
++                                      for (--p; n < endp; ++n)
++                                              if (!glob_match(p, n))
++                                                      return 0;
++                              } else if (c == '/') {
++                                      while (*n != '\0' && *n != '/')
++                                              ++n;
++                                      if (*n == '/' && !glob_match(p, n + 1))
++                                              return 0;
++                              } else {
++                                      for (--p; n < endp; ++n)
++                                              if (*n == c && !glob_match(p, n))
++                                                      return 0;
++                              }
++
++                              return 1;
++                      }
++              case '[':
++                      {
++                      int not;
++                      char cold;
++
++                      if (*n == '\0' || *n == '/')
++                              return 1;
++
++                      not = (*p == '!' || *p == '^');
++                      if (not)
++                              ++p;
++
++                      c = *p++;
++                      for (;;) {
++                              unsigned char fn = (unsigned char)*n;
++
++                              if (c == '\0')
++                                      return 1;
++                              else {
++                                      if (c == fn)
++                                              goto matched;
++                                      cold = c;
++                                      c = *p++;
++
++                                      if (c == '-' && *p != ']') {
++                                              unsigned char cend = *p++;
++
++                                              if (cend == '\0')
++                                                      return 1;
++
++                                              if (cold <= fn && fn <= cend)
++                                                      goto matched;
++
++                                              c = *p++;
++                                      }
++                              }
++
++                              if (c == ']')
++                                      break;
++                      }
++                      if (!not)
++                              return 1;
++                      break;
++              matched:
++                      while (c != ']') {
++                              if (c == '\0')
++                                      return 1;
++
++                              c = *p++;
++                      }
++                      if (not)
++                              return 1;
++              }
++              break;
++      default:
++              if (c != *n)
++                      return 1;
++      }
++
++      ++n;
++      }
++
++      if (*n == '\0')
++              return 0;
++
++      if (*n == '/')
++              return 0;
++
++      return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++      const struct dentry *dentry, const struct vfsmount *mnt, char **path)
++{
++      struct acl_object_label *tmp;
++
++      if (*path == NULL)
++              *path = gr_to_filename_nolock(dentry, mnt);
++
++      tmp = globbed;
++
++      while (tmp) {
++              if (!glob_match(tmp->filename, *path))
++                      return tmp;
++              tmp = tmp->next;
++      }
++
++      return NULL;
++}
++
++static struct acl_object_label *
++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++          const ino_t curr_ino, const dev_t curr_dev,
++          const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++      struct acl_subject_label *tmpsubj;
++      struct acl_object_label *retval;
++      struct acl_object_label *retval2;
++
++      tmpsubj = (struct acl_subject_label *) subj;
++      read_lock(&gr_inode_lock);
++      do {
++              retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
++              if (retval) {
++                      if (checkglob && retval->globbed) {
++                              retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
++                              if (retval2)
++                                      retval = retval2;
++                      }
++                      break;
++              }
++      } while ((tmpsubj = tmpsubj->parent_subject));
++      read_unlock(&gr_inode_lock);
++
++      return retval;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++          struct dentry *curr_dentry,
++          const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++      int newglob = checkglob;
++      ino_t inode;
++      dev_t device;
++
++      /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
++         as we don't want a / * rule to match instead of the / object
++         don't do this for create lookups that call this function though, since they're looking up
++         on the parent and thus need globbing checks on all paths
++      */
++      if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
++              newglob = GR_NO_GLOB;
++
++      spin_lock(&curr_dentry->d_lock);
++      inode = curr_dentry->d_inode->i_ino;
++      device = __get_dev(curr_dentry);
++      spin_unlock(&curr_dentry->d_lock);
++
++      return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
++}
++
++#ifdef CONFIG_HUGETLBFS
++static inline bool
++is_hugetlbfs_mnt(const struct vfsmount *mnt)
++{
++      int i;
++      for (i = 0; i < HUGE_MAX_HSTATE; i++) {
++              if (unlikely(hugetlbfs_vfsmount[i] == mnt))
++                      return true;
++      }
++
++      return false;
++}
++#endif
++
++static struct acl_object_label *
++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj, char *path, const int checkglob)
++{
++      struct dentry *dentry = (struct dentry *) l_dentry;
++      struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++      struct mount *real_mnt = real_mount(mnt);
++      struct acl_object_label *retval;
++      struct dentry *parent;
++
++      br_read_lock(&vfsmount_lock);
++      write_seqlock(&rename_lock);
++
++      if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
++#ifdef CONFIG_NET
++          mnt == sock_mnt ||
++#endif
++#ifdef CONFIG_HUGETLBFS
++          (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
++#endif
++              /* ignore Eric Biederman */
++          IS_PRIVATE(l_dentry->d_inode))) {
++              retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
++              goto out;
++      }
++
++      for (;;) {
++              if (dentry == real_root.dentry && mnt == real_root.mnt)
++                      break;
++
++              if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++                      if (!mnt_has_parent(real_mnt))
++                              break;
++
++                      retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++                      if (retval != NULL)
++                              goto out;
++
++                      dentry = real_mnt->mnt_mountpoint;
++                      real_mnt = real_mnt->mnt_parent;
++                      mnt = &real_mnt->mnt;
++                      continue;
++              }
++
++              parent = dentry->d_parent;
++              retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++              if (retval != NULL)
++                      goto out;
++
++              dentry = parent;
++      }
++
++      retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++
++      /* real_root is pinned so we don't have to hold a reference */
++      if (retval == NULL)
++              retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
++out:
++      write_sequnlock(&rename_lock);
++      br_read_unlock(&vfsmount_lock);
++
++      BUG_ON(retval == NULL);
++
++      return retval;
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj)
++{
++      char *path = NULL;
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj)
++{
++      char *path = NULL;
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++                   const struct acl_subject_label *subj, char *path)
++{
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
++}
++
++static struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++             const struct acl_role_label *role)
++{
++      struct dentry *dentry = (struct dentry *) l_dentry;
++      struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++      struct mount *real_mnt = real_mount(mnt);
++      struct acl_subject_label *retval;
++      struct dentry *parent;
++
++      br_read_lock(&vfsmount_lock);
++      write_seqlock(&rename_lock);
++
++      for (;;) {
++              if (dentry == real_root.dentry && mnt == real_root.mnt)
++                      break;
++              if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++                      if (!mnt_has_parent(real_mnt))
++                              break;
++
++                      spin_lock(&dentry->d_lock);
++                      read_lock(&gr_inode_lock);
++                      retval =
++                              lookup_acl_subj_label(dentry->d_inode->i_ino,
++                                              __get_dev(dentry), role);
++                      read_unlock(&gr_inode_lock);
++                      spin_unlock(&dentry->d_lock);
++                      if (retval != NULL)
++                              goto out;
++
++                      dentry = real_mnt->mnt_mountpoint;
++                      real_mnt = real_mnt->mnt_parent;
++                      mnt = &real_mnt->mnt;
++                      continue;
++              }
++
++              spin_lock(&dentry->d_lock);
++              read_lock(&gr_inode_lock);
++              retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++                                        __get_dev(dentry), role);
++              read_unlock(&gr_inode_lock);
++              parent = dentry->d_parent;
++              spin_unlock(&dentry->d_lock);
++
++              if (retval != NULL)
++                      goto out;
++
++              dentry = parent;
++      }
++
++      spin_lock(&dentry->d_lock);
++      read_lock(&gr_inode_lock);
++      retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++                                __get_dev(dentry), role);
++      read_unlock(&gr_inode_lock);
++      spin_unlock(&dentry->d_lock);
++
++      if (unlikely(retval == NULL)) {
++              /* real_root is pinned, we don't need to hold a reference */
++              read_lock(&gr_inode_lock);
++              retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
++                                        __get_dev(real_root.dentry), role);
++              read_unlock(&gr_inode_lock);
++      }
++out:
++      write_sequnlock(&rename_lock);
++      br_read_unlock(&vfsmount_lock);
++
++      BUG_ON(retval == NULL);
++
++      return retval;
++}
++
++static void
++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
++
++      return;
++}
++
++static void
++gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
++
++      return;
++}
++
++static void
++gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
++
++      return;
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++             const struct vfsmount * mnt)
++{
++      __u32 retval = mode;
++      struct acl_subject_label *curracl;
++      struct acl_object_label *currobj;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (mode & ~GR_AUDITS);
++
++      curracl = current->acl;
++
++      currobj = chk_obj_label(dentry, mnt, curracl);
++      retval = currobj->mode & mode;
++
++      /* if we're opening a specified transfer file for writing
++         (e.g. /dev/initctl), then transfer our role to init
++      */
++      if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
++                   current->role->roletype & GR_ROLE_PERSIST)) {
++              struct task_struct *task = init_pid_ns.child_reaper;
++
++              if (task->role != current->role) {
++                      task->acl_sp_role = 0;
++                      task->acl_role_id = current->acl_role_id;
++                      task->role = current->role;
++                      rcu_read_lock();
++                      read_lock(&grsec_exec_file_lock);
++                      gr_apply_subject_to_task(task);
++                      read_unlock(&grsec_exec_file_lock);
++                      rcu_read_unlock();
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
++              }
++      }
++
++      if (unlikely
++          ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
++           && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++              __u32 new_mode = mode;
++
++              new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++              retval = new_mode;
++
++              if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
++                      new_mode |= GR_INHERIT;
++
++              if (!(mode & GR_NOLEARN))
++                      gr_log_learn(dentry, mnt, new_mode);
++      }
++
++      return retval;
++}
++
++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
++                                            const struct dentry *parent,
++                                            const struct vfsmount *mnt)
++{
++      struct name_entry *match;
++      struct acl_object_label *matchpo;
++      struct acl_subject_label *curracl;
++      char *path;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return NULL;
++
++      preempt_disable();
++      path = gr_to_filename_rbac(new_dentry, mnt);
++      match = lookup_name_entry_create(path);
++
++      curracl = current->acl;
++
++      if (match) {
++              read_lock(&gr_inode_lock);
++              matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++              read_unlock(&gr_inode_lock);
++
++              if (matchpo) {
++                      preempt_enable();
++                      return matchpo;
++              }
++      }
++
++      // lookup parent
++
++      matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++
++      preempt_enable();
++      return matchpo;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++              const struct vfsmount * mnt, const __u32 mode)
++{
++      struct acl_object_label *matchpo;
++      __u32 retval;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (mode & ~GR_AUDITS);
++
++      matchpo = gr_get_create_object(new_dentry, parent, mnt);
++
++      retval = matchpo->mode & mode;
++
++      if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++          && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++              __u32 new_mode = mode;
++
++              new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++              gr_log_learn(new_dentry, mnt, new_mode);
++              return new_mode;
++      }
++
++      return retval;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++            const struct dentry * parent_dentry,
++            const struct vfsmount * parent_mnt,
++            const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++      struct acl_object_label *obj;
++      __u32 oldmode, newmode;
++      __u32 needmode;
++      __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
++                         GR_DELETE | GR_INHERIT;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (GR_CREATE | GR_LINK);
++
++      obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++      oldmode = obj->mode;
++
++      obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
++      newmode = obj->mode;
++
++      needmode = newmode & checkmodes;
++
++      // old name for hardlink must have at least the permissions of the new name
++      if ((oldmode & needmode) != needmode)
++              goto bad;
++
++      // if old name had restrictions/auditing, make sure the new name does as well
++      needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
++
++      // don't allow hardlinking of suid/sgid/fcapped files without permission
++      if (is_privileged_binary(old_dentry))
++              needmode |= GR_SETID;
++
++      if ((newmode & needmode) != needmode)
++              goto bad;
++
++      // enforce minimum permissions
++      if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
++              return newmode;
++bad:
++      needmode = oldmode;
++      if (is_privileged_binary(old_dentry))
++              needmode |= GR_SETID;
++      
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
++              return (GR_CREATE | GR_LINK);
++      } else if (newmode & GR_SUPPRESS)
++              return GR_SUPPRESS;
++      else
++              return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
++              return 1;
++
++      return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++      if (unlikely(!(gr_status & GR_READY) || !task))
++              return 0;
++
++      if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++          task->acl != current->acl)
++              return 1;
++
++      return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++      struct task_struct *p;
++      int ret = 0;
++
++      if (unlikely(!(gr_status & GR_READY) || !pid))
++              return ret;
++
++      read_lock(&tasklist_lock);
++      do_each_pid_task(pid, type, p) {
++              if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++                  p->acl != current->acl) {
++                      ret = 1;
++                      goto out;
++              }
++      } while_each_pid_task(pid, type, p);
++out:
++      read_unlock(&tasklist_lock);
++
++      return ret;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++      tsk->signal->used_accept = 0;
++      tsk->acl_sp_role = 0;
++      tsk->acl_role_id = current->acl_role_id;
++      tsk->acl = current->acl;
++      tsk->role = current->role;
++      tsk->signal->curr_ip = current->signal->curr_ip;
++      tsk->signal->saved_ip = current->signal->saved_ip;
++      if (current->exec_file)
++              get_file(current->exec_file);
++      tsk->exec_file = current->exec_file;
++      tsk->is_writable = current->is_writable;
++      if (unlikely(current->signal->used_accept)) {
++              current->signal->curr_ip = 0;
++              current->signal->saved_ip = 0;
++      }
++
++      return;
++}
++
++static void
++gr_set_proc_res(struct task_struct *task)
++{
++      struct acl_subject_label *proc;
++      unsigned short i;
++
++      proc = task->acl;
++
++      if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
++              return;
++
++      for (i = 0; i < RLIM_NLIMITS; i++) {
++              if (!(proc->resmask & (1U << i)))
++                      continue;
++
++              task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++              task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
++
++              if (i == RLIMIT_CPU)
++                      update_rlimit_cpu(task, proc->res[i].rlim_cur);
++      }
++
++      return;
++}
++
++extern int gr_process_kernel_setuid_ban(struct user_struct *user);
++
++int
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
++{
++      unsigned int i;
++      __u16 num;
++      uid_t *uidlist;
++      uid_t curuid;
++      int realok = 0;
++      int effectiveok = 0;
++      int fsok = 0;
++      uid_t globalreal, globaleffective, globalfs;
++
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
++      struct user_struct *user;
++
++      if (!uid_valid(real))
++              goto skipit;
++
++      /* find user based on global namespace */
++
++      globalreal = GR_GLOBAL_UID(real);
++
++      user = find_user(make_kuid(&init_user_ns, globalreal));
++      if (user == NULL)
++              goto skipit;
++
++      if (gr_process_kernel_setuid_ban(user)) {
++              /* for find_user */
++              free_uid(user);
++              return 1;
++      }
++
++      /* for find_user */
++      free_uid(user);
++
++skipit:
++#endif
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              gr_log_learn_uid_change(real, effective, fs);
++
++      num = current->acl->user_trans_num;
++      uidlist = current->acl->user_transitions;
++
++      if (uidlist == NULL)
++              return 0;
++
++      if (!uid_valid(real)) {
++              realok = 1;
++              globalreal = (uid_t)-1;         
++      } else {
++              globalreal = GR_GLOBAL_UID(real);               
++      }
++      if (!uid_valid(effective)) {
++              effectiveok = 1;
++              globaleffective = (uid_t)-1;
++      } else {
++              globaleffective = GR_GLOBAL_UID(effective);
++      }
++      if (!uid_valid(fs)) {
++              fsok = 1;
++              globalfs = (uid_t)-1;
++      } else {
++              globalfs = GR_GLOBAL_UID(fs);
++      }
++
++      if (current->acl->user_trans_type & GR_ID_ALLOW) {
++              for (i = 0; i < num; i++) {
++                      curuid = uidlist[i];
++                      if (globalreal == curuid)
++                              realok = 1;
++                      if (globaleffective == curuid)
++                              effectiveok = 1;
++                      if (globalfs == curuid)
++                              fsok = 1;
++              }
++      } else if (current->acl->user_trans_type & GR_ID_DENY) {
++              for (i = 0; i < num; i++) {
++                      curuid = uidlist[i];
++                      if (globalreal == curuid)
++                              break;
++                      if (globaleffective == curuid)
++                              break;
++                      if (globalfs == curuid)
++                              break;
++              }
++              /* not in deny list */
++              if (i == num) {
++                      realok = 1;
++                      effectiveok = 1;
++                      fsok = 1;
++              }
++      }
++
++      if (realok && effectiveok && fsok)
++              return 0;
++      else {
++              gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
++              return 1;
++      }
++}
++
++int
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
++{
++      unsigned int i;
++      __u16 num;
++      gid_t *gidlist;
++      gid_t curgid;
++      int realok = 0;
++      int effectiveok = 0;
++      int fsok = 0;
++      gid_t globalreal, globaleffective, globalfs;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              gr_log_learn_gid_change(real, effective, fs);
++
++      num = current->acl->group_trans_num;
++      gidlist = current->acl->group_transitions;
++
++      if (gidlist == NULL)
++              return 0;
++
++      if (!gid_valid(real)) {
++              realok = 1;
++              globalreal = (gid_t)-1;         
++      } else {
++              globalreal = GR_GLOBAL_GID(real);
++      }
++      if (!gid_valid(effective)) {
++              effectiveok = 1;
++              globaleffective = (gid_t)-1;            
++      } else {
++              globaleffective = GR_GLOBAL_GID(effective);
++      }
++      if (!gid_valid(fs)) {
++              fsok = 1;
++              globalfs = (gid_t)-1;           
++      } else {
++              globalfs = GR_GLOBAL_GID(fs);
++      }
++
++      if (current->acl->group_trans_type & GR_ID_ALLOW) {
++              for (i = 0; i < num; i++) {
++                      curgid = gidlist[i];
++                      if (globalreal == curgid)
++                              realok = 1;
++                      if (globaleffective == curgid)
++                              effectiveok = 1;
++                      if (globalfs == curgid)
++                              fsok = 1;
++              }
++      } else if (current->acl->group_trans_type & GR_ID_DENY) {
++              for (i = 0; i < num; i++) {
++                      curgid = gidlist[i];
++                      if (globalreal == curgid)
++                              break;
++                      if (globaleffective == curgid)
++                              break;
++                      if (globalfs == curgid)
++                              break;
++              }
++              /* not in deny list */
++              if (i == num) {
++                      realok = 1;
++                      effectiveok = 1;
++                      fsok = 1;
++              }
++      }
++
++      if (realok && effectiveok && fsok)
++              return 0;
++      else {
++              gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
++              return 1;
++      }
++}
++
++extern int gr_acl_is_capable(const int cap);
++
++void
++gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
++{
++      struct acl_role_label *role = task->role;
++      struct acl_subject_label *subj = NULL;
++      struct acl_object_label *obj;
++      struct file *filp;
++      uid_t uid;
++      gid_t gid;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      uid = GR_GLOBAL_UID(kuid);
++      gid = GR_GLOBAL_GID(kgid);
++
++      filp = task->exec_file;
++
++      /* kernel process, we'll give them the kernel role */
++      if (unlikely(!filp)) {
++              task->role = kernel_role;
++              task->acl = kernel_role->root_label;
++              return;
++      } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
++              role = lookup_acl_role_label(task, uid, gid);
++
++      /* don't change the role if we're not a privileged process */
++      if (role && task->role != role &&
++          (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
++           ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
++              return;
++
++      /* perform subject lookup in possibly new role
++         we can use this result below in the case where role == task->role
++      */
++      subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
++
++      /* if we changed uid/gid, but result in the same role
++         and are using inheritance, don't lose the inherited subject
++         if current subject is other than what normal lookup
++         would result in, we arrived via inheritance, don't
++         lose subject
++      */
++      if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
++                                 (subj == task->acl)))
++              task->acl = subj;
++
++      task->role = role;
++
++      task->is_writable = 0;
++
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++
++      gr_set_proc_res(task);
++
++      return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++                const int unsafe_flags)
++{
++      struct task_struct *task = current;
++      struct acl_subject_label *newacl;
++      struct acl_object_label *obj;
++      __u32 retmode;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      newacl = chk_subj_label(dentry, mnt, task->role);
++
++      /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
++         did an exec
++      */
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
++          (task->parent->acl->mode & GR_POVERRIDE))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              goto skip_check;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
++           !(task->role->roletype & GR_ROLE_GOD) &&
++           !gr_search_file(dentry, GR_PTRACERD, mnt) &&
++           !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++              if (unsafe_flags & LSM_UNSAFE_SHARE)
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
++              else
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
++              return -EACCES;
++      }
++
++skip_check:
++
++      obj = chk_obj_label(dentry, mnt, task->acl);
++      retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++      if (!(task->acl->mode & GR_INHERITLEARN) &&
++          ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
++              if (obj->nested)
++                      task->acl = obj->nested;
++              else
++                      task->acl = newacl;
++      } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++              gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
++
++      task->is_writable = 0;
++
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(dentry, mnt, default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++      obj = chk_obj_label(dentry, mnt, task->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++
++      gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++      return 0;
++}
++
++/* always called with valid inodev ptr */
++static void
++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
++{
++      struct acl_object_label *matchpo;
++      struct acl_subject_label *matchps;
++      struct acl_subject_label *subj;
++      struct acl_role_label *role;
++      unsigned int x;
++
++      FOR_EACH_ROLE_START(role)
++              FOR_EACH_SUBJECT_START(role, subj, x)
++                      if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++                              matchpo->mode |= GR_DELETED;
++              FOR_EACH_SUBJECT_END(subj,x)
++              FOR_EACH_NESTED_SUBJECT_START(role, subj)
++                      /* nested subjects aren't in the role's subj_hash table */
++                      if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++                              matchpo->mode |= GR_DELETED;
++              FOR_EACH_NESTED_SUBJECT_END(subj)
++              if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
++                      matchps->mode |= GR_DELETED;
++      FOR_EACH_ROLE_END(role)
++
++      inodev->nentry->deleted = 1;
++
++      return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++      struct inodev_entry *inodev;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      write_lock(&gr_inode_lock);
++      inodev = lookup_inodev_entry(ino, dev);
++      if (inodev != NULL)
++              do_handle_delete(inodev, ino, dev);
++      write_unlock(&gr_inode_lock);
++
++      return;
++}
++
++static void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++                   const ino_t newinode, const dev_t newdevice,
++                   struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != oldinode ||
++             match->device != olddevice ||
++             !(match->mode & GR_DELETED)))
++              match = match->next;
++
++      if (match && (match->inode == oldinode)
++          && (match->device == olddevice)
++          && (match->mode & GR_DELETED)) {
++              if (match->prev == NULL) {
++                      subj->obj_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->inode = newinode;
++              match->device = newdevice;
++              match->mode &= ~GR_DELETED;
++
++              insert_acl_obj_label(match, subj);
++      }
++
++      return;
++}
++
++static void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++                    const ino_t newinode, const dev_t newdevice,
++                    struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != oldinode ||
++             match->device != olddevice ||
++             !(match->mode & GR_DELETED)))
++              match = match->next;
++
++      if (match && (match->inode == oldinode)
++          && (match->device == olddevice)
++          && (match->mode & GR_DELETED)) {
++              if (match->prev == NULL) {
++                      role->subj_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->inode = newinode;
++              match->device = newdevice;
++              match->mode &= ~GR_DELETED;
++
++              insert_acl_subj_label(match, role);
++      }
++
++      return;
++}
++
++static void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++                  const ino_t newinode, const dev_t newdevice)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
++      struct inodev_entry *match;
++
++      match = inodev_set.i_hash[index];
++
++      while (match && (match->nentry->inode != oldinode ||
++             match->nentry->device != olddevice || !match->nentry->deleted))
++              match = match->next;
++
++      if (match && (match->nentry->inode == oldinode)
++          && (match->nentry->device == olddevice) &&
++          match->nentry->deleted) {
++              if (match->prev == NULL) {
++                      inodev_set.i_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->nentry->inode = newinode;
++              match->nentry->device = newdevice;
++              match->nentry->deleted = 0;
++
++              insert_inodev_entry(match);
++      }
++
++      return;
++}
++
++static void
++__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
++{
++      struct acl_subject_label *subj;
++      struct acl_role_label *role;
++      unsigned int x;
++
++      FOR_EACH_ROLE_START(role)
++              update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
++
++              FOR_EACH_NESTED_SUBJECT_START(role, subj)
++                      if ((subj->inode == ino) && (subj->device == dev)) {
++                              subj->inode = ino;
++                              subj->device = dev;
++                      }
++                      /* nested subjects aren't in the role's subj_hash table */
++                      update_acl_obj_label(matchn->inode, matchn->device,
++                                           ino, dev, subj);
++              FOR_EACH_NESTED_SUBJECT_END(subj)
++              FOR_EACH_SUBJECT_START(role, subj, x)
++                      update_acl_obj_label(matchn->inode, matchn->device,
++                                           ino, dev, subj);
++              FOR_EACH_SUBJECT_END(subj,x)
++      FOR_EACH_ROLE_END(role)
++
++      update_inodev_entry(matchn->inode, matchn->device, ino, dev);
++
++      return;
++}
++
++static void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++               const struct vfsmount *mnt)
++{
++      ino_t ino = dentry->d_inode->i_ino;
++      dev_t dev = __get_dev(dentry);
++
++      __do_handle_create(matchn, ino, dev);   
++
++      return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      struct name_entry *matchn;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
++
++      if (unlikely((unsigned long)matchn)) {
++              write_lock(&gr_inode_lock);
++              do_handle_create(matchn, dentry, mnt);
++              write_unlock(&gr_inode_lock);
++      }
++      preempt_enable();
++
++      return;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
++{
++      struct name_entry *matchn;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
++
++      if (unlikely((unsigned long)matchn)) {
++              write_lock(&gr_inode_lock);
++              __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
++              write_unlock(&gr_inode_lock);
++      }
++      preempt_enable();
++
++      return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++               struct dentry *old_dentry,
++               struct dentry *new_dentry,
++               struct vfsmount *mnt, const __u8 replace)
++{
++      struct name_entry *matchn;
++      struct inodev_entry *inodev;
++      struct inode *inode = new_dentry->d_inode;
++      ino_t old_ino = old_dentry->d_inode->i_ino;
++      dev_t old_dev = __get_dev(old_dentry);
++
++      /* vfs_rename swaps the name and parent link for old_dentry and
++         new_dentry
++         at this point, old_dentry has the new name, parent link, and inode
++         for the renamed file
++         if a file is being replaced by a rename, new_dentry has the inode
++         and name for the replaced file
++      */
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
++
++      /* we wouldn't have to check d_inode if it weren't for
++         NFS silly-renaming
++       */
++
++      write_lock(&gr_inode_lock);
++      if (unlikely(replace && inode)) {
++              ino_t new_ino = inode->i_ino;
++              dev_t new_dev = __get_dev(new_dentry);
++
++              inodev = lookup_inodev_entry(new_ino, new_dev);
++              if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
++                      do_handle_delete(inodev, new_ino, new_dev);
++      }
++
++      inodev = lookup_inodev_entry(old_ino, old_dev);
++      if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
++              do_handle_delete(inodev, old_ino, old_dev);
++
++      if (unlikely((unsigned long)matchn))
++              do_handle_create(matchn, old_dentry, mnt);
++
++      write_unlock(&gr_inode_lock);
++      preempt_enable();
++
++      return;
++}
++
++static int
++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
++                       unsigned char **sum)
++{
++      struct acl_role_label *r;
++      struct role_allowed_ip *ipp;
++      struct role_transition *trans;
++      unsigned int i;
++      int found = 0;
++      u32 curr_ip = current->signal->curr_ip;
++
++      current->signal->saved_ip = curr_ip;
++
++      /* check transition table */
++
++      for (trans = current->role->transitions; trans; trans = trans->next) {
++              if (!strcmp(rolename, trans->rolename)) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found)
++              return 0;
++
++      /* handle special roles that do not require authentication
++         and check ip */
++
++      FOR_EACH_ROLE_START(r)
++              if (!strcmp(rolename, r->rolename) &&
++                  (r->roletype & GR_ROLE_SPECIAL)) {
++                      found = 0;
++                      if (r->allowed_ips != NULL) {
++                              for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++                                      if ((ntohl(curr_ip) & ipp->netmask) ==
++                                           (ntohl(ipp->addr) & ipp->netmask))
++                                              found = 1;
++                              }
++                      } else
++                              found = 2;
++                      if (!found)
++                              return 0;
++
++                      if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
++                          ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
++                              *salt = NULL;
++                              *sum = NULL;
++                              return 1;
++                      }
++              }
++      FOR_EACH_ROLE_END(r)
++
++      for (i = 0; i < num_sprole_pws; i++) {
++              if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
++                      *salt = acl_special_roles[i]->salt;
++                      *sum = acl_special_roles[i]->sum;
++                      return 1;
++              }
++      }
++
++      return 0;
++}
++
++static void
++assign_special_role(char *rolename)
++{
++      struct acl_object_label *obj;
++      struct acl_role_label *r;
++      struct acl_role_label *assigned = NULL;
++      struct task_struct *tsk;
++      struct file *filp;
++
++      FOR_EACH_ROLE_START(r)
++              if (!strcmp(rolename, r->rolename) &&
++                  (r->roletype & GR_ROLE_SPECIAL)) {
++                      assigned = r;
++                      break;
++              }
++      FOR_EACH_ROLE_END(r)
++
++      if (!assigned)
++              return;
++
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++
++      tsk = current->real_parent;
++      if (tsk == NULL)
++              goto out_unlock;
++
++      filp = tsk->exec_file;
++      if (filp == NULL)
++              goto out_unlock;
++
++      tsk->is_writable = 0;
++
++      tsk->acl_sp_role = 1;
++      tsk->acl_role_id = ++acl_sp_role_value;
++      tsk->role = assigned;
++      tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
++
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              tsk->is_writable = 1;
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
++#endif
++
++out_unlock:
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      return;
++}
++
++int gr_check_secure_terminal(struct task_struct *task)
++{
++      struct task_struct *p, *p2, *p3;
++      struct files_struct *files;
++      struct fdtable *fdt;
++      struct file *our_file = NULL, *file;
++      int i;
++
++      if (task->signal->tty == NULL)
++              return 1;
++
++      files = get_files_struct(task);
++      if (files != NULL) {
++              rcu_read_lock();
++              fdt = files_fdtable(files);
++              for (i=0; i < fdt->max_fds; i++) {
++                      file = fcheck_files(files, i);
++                      if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
++                              get_file(file);
++                              our_file = file;
++                      }
++              }
++              rcu_read_unlock();
++              put_files_struct(files);
++      }
++
++      if (our_file == NULL)
++              return 1;
++
++      read_lock(&tasklist_lock);
++      do_each_thread(p2, p) {
++              files = get_files_struct(p);
++              if (files == NULL ||
++                  (p->signal && p->signal->tty == task->signal->tty)) {
++                      if (files != NULL)
++                              put_files_struct(files);
++                      continue;
++              }
++              rcu_read_lock();
++              fdt = files_fdtable(files);
++              for (i=0; i < fdt->max_fds; i++) {
++                      file = fcheck_files(files, i);
++                      if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
++                          file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
++                              p3 = task;
++                              while (task_pid_nr(p3) > 0) {
++                                      if (p3 == p)
++                                              break;
++                                      p3 = p3->real_parent;
++                              }
++                              if (p3 == p)
++                                      break;
++                              gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
++                              gr_handle_alertkill(p);
++                              rcu_read_unlock();
++                              put_files_struct(files);
++                              read_unlock(&tasklist_lock);
++                              fput(our_file);
++                              return 0;
++                      }
++              }
++              rcu_read_unlock();
++              put_files_struct(files);
++      } while_each_thread(p2, p);
++      read_unlock(&tasklist_lock);
++
++      fput(our_file);
++      return 1;
++}
++
++static int gr_rbac_disable(void *unused)
++{
++      pax_open_kernel();
++      gr_status &= ~GR_READY;
++      pax_close_kernel();
++
++      return 0;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
++{
++      struct gr_arg_wrapper uwrap;
++      unsigned char *sprole_salt = NULL;
++      unsigned char *sprole_sum = NULL;
++      int error = 0;
++      int error2 = 0;
++      size_t req_count = 0;
++
++      mutex_lock(&gr_dev_mutex);
++
++      if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
++              error = -EPERM;
++              goto out;
++      }
++
++#ifdef CONFIG_COMPAT
++      pax_open_kernel();
++      if (is_compat_task()) {
++              copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
++              copy_gr_arg = &copy_gr_arg_compat;
++              copy_acl_object_label = &copy_acl_object_label_compat;
++              copy_acl_subject_label = &copy_acl_subject_label_compat;
++              copy_acl_role_label = &copy_acl_role_label_compat;
++              copy_acl_ip_label = &copy_acl_ip_label_compat;
++              copy_role_allowed_ip = &copy_role_allowed_ip_compat;
++              copy_role_transition = &copy_role_transition_compat;
++              copy_sprole_pw = &copy_sprole_pw_compat;
++              copy_gr_hash_struct = &copy_gr_hash_struct_compat;
++              copy_pointer_from_array = &copy_pointer_from_array_compat;
++              get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
++      } else {
++              copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
++              copy_gr_arg = &copy_gr_arg_normal;
++              copy_acl_object_label = &copy_acl_object_label_normal;
++              copy_acl_subject_label = &copy_acl_subject_label_normal;
++              copy_acl_role_label = &copy_acl_role_label_normal;
++              copy_acl_ip_label = &copy_acl_ip_label_normal;
++              copy_role_allowed_ip = &copy_role_allowed_ip_normal;
++              copy_role_transition = &copy_role_transition_normal;
++              copy_sprole_pw = &copy_sprole_pw_normal;
++              copy_gr_hash_struct = &copy_gr_hash_struct_normal;
++              copy_pointer_from_array = &copy_pointer_from_array_normal;
++              get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
++      }
++      pax_close_kernel();
++#endif
++
++      req_count = get_gr_arg_wrapper_size();
++
++      if (count != req_count) {
++              gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
++              error = -EINVAL;
++              goto out;
++      }
++
++      
++      if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
++              gr_auth_expires = 0;
++              gr_auth_attempts = 0;
++      }
++
++      error = copy_gr_arg_wrapper(buf, &uwrap);
++      if (error)
++              goto out;
++
++      error = copy_gr_arg(uwrap.arg, gr_usermode);
++      if (error)
++              goto out;
++
++      if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++          gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++          time_after(gr_auth_expires, get_seconds())) {
++              error = -EBUSY;
++              goto out;
++      }
++
++      /* if non-root trying to do anything other than use a special role,
++         do not attempt authentication, do not count towards authentication
++         locking
++       */
++
++      if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
++          gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++          gr_is_global_nonroot(current_uid())) {
++              error = -EPERM;
++              goto out;
++      }
++
++      /* ensure pw and special role name are null terminated */
++
++      gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++      gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++      /* Okay. 
++       * We have our enough of the argument structure..(we have yet
++       * to copy_from_user the tables themselves) . Copy the tables
++       * only if we need them, i.e. for loading operations. */
++
++      switch (gr_usermode->mode) {
++      case GR_STATUS:
++                      if (gr_status & GR_READY) {
++                              error = 1;
++                              if (!gr_check_secure_terminal(current))
++                                      error = 3;
++                      } else
++                              error = 2;
++                      goto out;
++      case GR_SHUTDOWN:
++              if ((gr_status & GR_READY)
++                  && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      stop_machine(gr_rbac_disable, NULL, NULL);
++                      free_variables();
++                      memset(gr_usermode, 0, sizeof (struct gr_arg));
++                      memset(gr_system_salt, 0, GR_SALT_LEN);
++                      memset(gr_system_sum, 0, GR_SHA_LEN);
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
++              } else if (gr_status & GR_READY) {
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
++                      error = -EPERM;
++              } else {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
++                      error = -EAGAIN;
++              }
++              break;
++      case GR_ENABLE:
++              if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
++                      gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
++              else {
++                      if (gr_status & GR_READY)
++                              error = -EAGAIN;
++                      else
++                              error = error2;
++                      gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
++              }
++              break;
++      case GR_RELOAD:
++              if (!(gr_status & GR_READY)) {
++                      gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
++                      error = -EAGAIN;
++              } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      stop_machine(gr_rbac_disable, NULL, NULL);
++                      free_variables();
++                      error2 = gracl_init(gr_usermode);
++                      if (!error2)
++                              gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
++                      else {
++                              gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++                              error = error2;
++                      }
++              } else {
++                      gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++                      error = -EPERM;
++              }
++              break;
++      case GR_SEGVMOD:
++              if (unlikely(!(gr_status & GR_READY))) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
++                      if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++                              struct acl_subject_label *segvacl;
++                              segvacl =
++                                  lookup_acl_subj_label(gr_usermode->segv_inode,
++                                                        gr_usermode->segv_device,
++                                                        current->role);
++                              if (segvacl) {
++                                      segvacl->crashes = 0;
++                                      segvacl->expires = 0;
++                              }
++                      } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
++                              gr_remove_uid(gr_usermode->segv_uid);
++                      }
++              } else {
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
++                      error = -EPERM;
++              }
++              break;
++      case GR_SPROLE:
++      case GR_SPROLEPAM:
++              if (unlikely(!(gr_status & GR_READY))) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
++                      current->role->expires = 0;
++                      current->role->auth_attempts = 0;
++              }
++
++              if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++                  time_after(current->role->expires, get_seconds())) {
++                      error = -EBUSY;
++                      goto out;
++              }
++
++              if (lookup_special_role_auth
++                  (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++                  && ((!sprole_salt && !sprole_sum)
++                      || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++                      char *p = "";
++                      assign_special_role(gr_usermode->sp_role);
++                      read_lock(&tasklist_lock);
++                      if (current->real_parent)
++                              p = current->real_parent->role->rolename;
++                      read_unlock(&tasklist_lock);
++                      gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
++                                      p, acl_sp_role_value);
++              } else {
++                      gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
++                      error = -EPERM;
++                      if(!(current->role->auth_attempts++))
++                              current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++                      goto out;
++              }
++              break;
++      case GR_UNSPROLE:
++              if (unlikely(!(gr_status & GR_READY))) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (current->role->roletype & GR_ROLE_SPECIAL) {
++                      char *p = "";
++                      int i = 0;
++
++                      read_lock(&tasklist_lock);
++                      if (current->real_parent) {
++                              p = current->real_parent->role->rolename;
++                              i = current->real_parent->acl_role_id;
++                      }
++                      read_unlock(&tasklist_lock);
++
++                      gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
++                      gr_set_acls(1);
++              } else {
++                      error = -EPERM;
++                      goto out;
++              }
++              break;
++      default:
++              gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
++              error = -EINVAL;
++              break;
++      }
++
++      if (error != -EPERM)
++              goto out;
++
++      if(!(gr_auth_attempts++))
++              gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++      out:
++      mutex_unlock(&gr_dev_mutex);
++
++      if (!error)
++              error = req_count;
++
++      return error;
++}
++
++/* must be called with
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++*/
++int gr_apply_subject_to_task(struct task_struct *task)
++{
++      struct acl_object_label *obj;
++      char *tmpname;
++      struct acl_subject_label *tmpsubj;
++      struct file *filp;
++      struct name_entry *nmatch;
++
++      filp = task->exec_file;
++      if (filp == NULL)
++              return 0;
++
++      /* the following is to apply the correct subject 
++         on binaries running when the RBAC system 
++         is enabled, when the binaries have been 
++         replaced or deleted since their execution
++         -----
++         when the RBAC system starts, the inode/dev
++         from exec_file will be one the RBAC system
++         is unaware of.  It only knows the inode/dev
++         of the present file on disk, or the absence
++         of it.
++      */
++      preempt_disable();
++      tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
++                      
++      nmatch = lookup_name_entry(tmpname);
++      preempt_enable();
++      tmpsubj = NULL;
++      if (nmatch) {
++              if (nmatch->deleted)
++                      tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
++              else
++                      tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
++              if (tmpsubj != NULL)
++                      task->acl = tmpsubj;
++      }
++      if (tmpsubj == NULL)
++              task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
++                                         task->role);
++      if (task->acl) {
++              task->is_writable = 0;
++              /* ignore additional mmap checks for processes that are writable 
++                 by the default ACL */
++              obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++              if (unlikely(obj->mode & GR_WRITE))
++                      task->is_writable = 1;
++              obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++              if (unlikely(obj->mode & GR_WRITE))
++                      task->is_writable = 1;
++
++              gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++              printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++      } else {
++              return 1;
++      }
++
++      return 0;
++}
++
++int
++gr_set_acls(const int type)
++{
++      struct task_struct *task, *task2;
++      struct acl_role_label *role = current->role;
++      __u16 acl_role_id = current->acl_role_id;
++      const struct cred *cred;
++      int ret;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      do_each_thread(task2, task) {
++              /* check to see if we're called from the exit handler,
++                 if so, only replace ACLs that have inherited the admin
++                 ACL */
++
++              if (type && (task->role != role ||
++                           task->acl_role_id != acl_role_id))
++                      continue;
++
++              task->acl_role_id = 0;
++              task->acl_sp_role = 0;
++
++              if (task->exec_file) {
++                      cred = __task_cred(task);
++                      task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
++                      ret = gr_apply_subject_to_task(task);
++                      if (ret) {
++                              read_unlock(&grsec_exec_file_lock);
++                              read_unlock(&tasklist_lock);
++                              rcu_read_unlock();
++                              gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
++                              return ret;
++                      }
++              } else {
++                      // it's a kernel process
++                      task->role = kernel_role;
++                      task->acl = kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++                      task->acl->mode &= ~GR_PROCFIND;
++#endif
++              }
++      } while_each_thread(task2, task);
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return 0;
++}
++
++#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
++static const unsigned long res_learn_bumps[GR_NLIMITS] = {
++      [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
++      [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
++      [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
++      [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
++      [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
++      [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
++      [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
++      [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
++      [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
++      [RLIMIT_AS] = GR_RLIM_AS_BUMP,
++      [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
++      [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
++      [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
++      [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
++      [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
++      [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
++};
++
++void
++gr_learn_resource(const struct task_struct *task,
++                const int res, const unsigned long wanted, const int gt)
++{
++      struct acl_subject_label *acl;
++      const struct cred *cred;
++
++      if (unlikely((gr_status & GR_READY) &&
++                   task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
++              goto skip_reslog;
++
++      gr_log_resource(task, res, wanted, gt);
++skip_reslog:
++
++      if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
++              return;
++
++      acl = task->acl;
++
++      if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
++                 !(acl->resmask & (1U << (unsigned short) res))))
++              return;
++
++      if (wanted >= acl->res[res].rlim_cur) {
++              unsigned long res_add;
++
++              res_add = wanted + res_learn_bumps[res];
++
++              acl->res[res].rlim_cur = res_add;
++
++              if (wanted > acl->res[res].rlim_max)
++                      acl->res[res].rlim_max = res_add;
++
++              /* only log the subject filename, since resource logging is supported for
++                 single-subject learning only */
++              rcu_read_lock();
++              cred = __task_cred(task);
++              security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++                             task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
++                             acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
++                             "", (unsigned long) res, &task->signal->saved_ip);
++              rcu_read_unlock();
++      }
++
++      return;
++}
++EXPORT_SYMBOL(gr_learn_resource);
++#endif
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      struct task_struct *task = current;
++        struct acl_subject_label *proc;
++      unsigned long flags;
++
++        if (unlikely(!(gr_status & GR_READY)))
++                return;
++
++      flags = pax_get_flags(task);
++
++        proc = task->acl;
++
++      if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
++              flags &= ~MF_PAX_PAGEEXEC;
++      if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
++              flags &= ~MF_PAX_SEGMEXEC;
++      if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
++              flags &= ~MF_PAX_RANDMMAP;
++      if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
++              flags &= ~MF_PAX_EMUTRAMP;
++      if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
++              flags &= ~MF_PAX_MPROTECT;
++
++      if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
++              flags |= MF_PAX_PAGEEXEC;
++      if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
++              flags |= MF_PAX_SEGMEXEC;
++      if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
++              flags |= MF_PAX_RANDMMAP;
++      if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
++              flags |= MF_PAX_EMUTRAMP;
++      if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
++              flags |= MF_PAX_MPROTECT;
++
++      pax_set_flags(task, flags);
++
++        return;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++      struct file *filp;
++      struct task_struct *tmp = task;
++      struct task_struct *curtemp = current;
++      __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++#endif
++
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      filp = task->exec_file;
++
++      while (task_pid_nr(tmp) > 0) {
++              if (tmp == curtemp)
++                      break;
++              tmp = tmp->real_parent;
++      }
++
++      if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
++                              ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
++              read_unlock(&grsec_exec_file_lock);
++              read_unlock(&tasklist_lock);
++              return 1;
++      }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (!(gr_status & GR_READY)) {
++              read_unlock(&grsec_exec_file_lock);
++              read_unlock(&tasklist_lock);
++              return 0;
++      }
++#endif
++
++      retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++
++      if (retmode & GR_NOPTRACE)
++              return 1;
++
++      if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++          && (current->acl != task->acl || (current->acl != current->role->root_label
++          && task_pid_nr(current) != task_pid_nr(task))))
++              return 1;
++
++      return 0;
++}
++
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      if (!(current->role->roletype & GR_ROLE_GOD))
++              return;
++
++      seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
++                      p->role->rolename, gr_task_roletype_to_char(p),
++                      p->acl->filename);
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++      struct task_struct *tmp = task;
++      struct task_struct *curtemp = current;
++      __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++#endif
++      if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
++              read_lock(&tasklist_lock);
++              while (task_pid_nr(tmp) > 0) {
++                      if (tmp == curtemp)
++                              break;
++                      tmp = tmp->real_parent;
++              }
++
++              if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
++                                      ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
++                      read_unlock(&tasklist_lock);
++                      gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++                      return 1;
++              }
++              read_unlock(&tasklist_lock);
++      }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (!(gr_status & GR_READY))
++              return 0;
++#endif
++
++      read_lock(&grsec_exec_file_lock);
++      if (unlikely(!task->exec_file)) {
++              read_unlock(&grsec_exec_file_lock);
++              return 0;
++      }
++
++      retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
++      read_unlock(&grsec_exec_file_lock);
++
++      if (retmode & GR_NOPTRACE) {
++              gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++              return 1;
++      }
++              
++      if (retmode & GR_PTRACERD) {
++              switch (request) {
++              case PTRACE_SEIZE:
++              case PTRACE_POKETEXT:
++              case PTRACE_POKEDATA:
++              case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
++              case PTRACE_SETREGS:
++              case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++              case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++              case PTRACE_SETVRREGS:
++#endif
++                      return 1;
++              default:
++                      return 0;
++              }
++      } else if (!(current->acl->mode & GR_POVERRIDE) &&
++                 !(current->role->roletype & GR_ROLE_GOD) &&
++                 (current->acl != task->acl)) {
++              gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++              return 1;
++      }
++
++      return 0;
++}
++
++static int is_writable_mmap(const struct file *filp)
++{
++      struct task_struct *task = current;
++      struct acl_object_label *obj, *obj2;
++
++      if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
++          !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
++              obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++              obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
++                                   task->role->root_label);
++              if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
++                      return 1;
++              }
++      }
++      return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++      __u32 mode;
++
++      if (unlikely(!file || !(prot & PROT_EXEC)))
++              return 1;
++
++      if (is_writable_mmap(file))
++              return 0;
++
++      mode =
++          gr_search_file(file->f_path.dentry,
++                         GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++                         file->f_path.mnt);
++
++      if (!gr_tpe_allow(file))
++              return 0;
++
++      if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_EXEC))) {
++              return 0;
++      } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 1;
++      }
++
++      return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++      __u32 mode;
++
++      if (unlikely(!file || !(prot & PROT_EXEC)))
++              return 1;
++
++      if (is_writable_mmap(file))
++              return 0;
++
++      mode =
++          gr_search_file(file->f_path.dentry,
++                         GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++                         file->f_path.mnt);
++
++      if (!gr_tpe_allow(file))
++              return 0;
++
++      if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_EXEC))) {
++              return 0;
++      } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 1;
++      }
++
++      return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++      unsigned long runtime;
++      unsigned long cputime;
++      unsigned int wday, cday;
++      __u8 whr, chr;
++      __u8 wmin, cmin;
++      __u8 wsec, csec;
++      struct timespec timeval;
++
++      if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++                   !(task->acl->mode & GR_PROCACCT)))
++              return;
++
++      do_posix_clock_monotonic_gettime(&timeval);
++      runtime = timeval.tv_sec - task->start_time.tv_sec;
++      wday = runtime / (3600 * 24);
++      runtime -= wday * (3600 * 24);
++      whr = runtime / 3600;
++      runtime -= whr * 3600;
++      wmin = runtime / 60;
++      runtime -= wmin * 60;
++      wsec = runtime;
++
++      cputime = (task->utime + task->stime) / HZ;
++      cday = cputime / (3600 * 24);
++      cputime -= cday * (3600 * 24);
++      chr = cputime / 3600;
++      cputime -= chr * 3600;
++      cmin = cputime / 60;
++      cputime -= cmin * 60;
++      csec = cputime;
++
++      gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
++
++      return;
++}
++
++void gr_set_kernel_label(struct task_struct *task)
++{
++      if (gr_status & GR_READY) {
++              task->role = kernel_role;
++              task->acl = kernel_role->root_label;
++      }
++      return;
++}
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++      struct task_struct *task;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      const struct cred *cred;
++#endif
++      int ret = 0;
++
++      /* restrict taskstats viewing to un-chrooted root users
++         who have the 'view' subject flag if the RBAC system is enabled
++      */
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      task = find_task_by_vpid(pid);
++      if (task) {
++#ifdef CONFIG_GRKERNSEC_CHROOT
++              if (proc_is_chrooted(task))
++                      ret = -EACCES;
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              cred = __task_cred(task);
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++              if (gr_is_global_nonroot(cred->uid))
++                      ret = -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
++                      ret = -EACCES;
++#endif
++#endif
++              if (gr_status & GR_READY) {
++                      if (!(task->acl->mode & GR_VIEW))
++                              ret = -EACCES;
++              }
++      } else
++              ret = -ENOENT;
++
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return ret;
++}
++#endif
++
++/* AUXV entries are filled via a descendant of search_binary_handler
++   after we've already applied the subject for the target
++*/
++int gr_acl_enable_at_secure(void)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & GR_ATSECURE)
++              return 1;
++
++      return 0;
++}
++      
++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
++{
++      struct task_struct *task = current;
++      struct dentry *dentry = file->f_path.dentry;
++      struct vfsmount *mnt = file->f_path.mnt;
++      struct acl_object_label *obj, *tmp;
++      struct acl_subject_label *subj;
++      unsigned int bufsize;
++      int is_not_root;
++      char *path;
++      dev_t dev = __get_dev(dentry);
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 1;
++
++      if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              return 1;
++
++      /* ignore Eric Biederman */
++      if (IS_PRIVATE(dentry->d_inode))
++              return 1;
++
++      subj = task->acl;
++      read_lock(&gr_inode_lock);
++      do {
++              obj = lookup_acl_obj_label(ino, dev, subj);
++              if (obj != NULL) {
++                      read_unlock(&gr_inode_lock);
++                      return (obj->mode & GR_FIND) ? 1 : 0;
++              }
++      } while ((subj = subj->parent_subject));
++      read_unlock(&gr_inode_lock);
++      
++      /* this is purely an optimization since we're looking for an object
++         for the directory we're doing a readdir on
++         if it's possible for any globbed object to match the entry we're
++         filling into the directory, then the object we find here will be
++         an anchor point with attached globbed objects
++      */
++      obj = chk_obj_label_noglob(dentry, mnt, task->acl);
++      if (obj->globbed == NULL)
++              return (obj->mode & GR_FIND) ? 1 : 0;
++
++      is_not_root = ((obj->filename[0] == '/') &&
++                 (obj->filename[1] == '\0')) ? 0 : 1;
++      bufsize = PAGE_SIZE - namelen - is_not_root;
++
++      /* check bufsize > PAGE_SIZE || bufsize == 0 */
++      if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
++              return 1;
++
++      preempt_disable();
++      path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++                         bufsize);
++
++      bufsize = strlen(path);
++
++      /* if base is "/", don't append an additional slash */
++      if (is_not_root)
++              *(path + bufsize) = '/';
++      memcpy(path + bufsize + is_not_root, name, namelen);
++      *(path + bufsize + namelen + is_not_root) = '\0';
++
++      tmp = obj->globbed;
++      while (tmp) {
++              if (!glob_match(tmp->filename, path)) {
++                      preempt_enable();
++                      return (tmp->mode & GR_FIND) ? 1 : 0;
++              }
++              tmp = tmp->next;
++      }
++      preempt_enable();
++      return (obj->mode & GR_FIND) ? 1 : 0;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++      struct file *filp;  
++
++      write_lock(&grsec_exec_file_lock);
++      filp = task->exec_file;   
++      task->exec_file = NULL;
++      write_unlock(&grsec_exec_file_lock);
++
++      if (filp)
++              fput(filp);
++
++      return;
++}
++
++
++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
++EXPORT_SYMBOL(gr_acl_is_enabled);
++#endif
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
++
+diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
+new file mode 100644
+index 0000000..34fefda
+--- /dev/null
++++ b/grsecurity/gracl_alloc.c
+@@ -0,0 +1,105 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static unsigned long alloc_stack_next = 1;
++static unsigned long alloc_stack_size = 1;
++static void **alloc_stack;
++
++static __inline__ int
++alloc_pop(void)
++{
++      if (alloc_stack_next == 1)
++              return 0;
++
++      kfree(alloc_stack[alloc_stack_next - 2]);
++
++      alloc_stack_next--;
++
++      return 1;
++}
++
++static __inline__ int
++alloc_push(void *buf)
++{
++      if (alloc_stack_next >= alloc_stack_size)
++              return 1;
++
++      alloc_stack[alloc_stack_next - 1] = buf;
++
++      alloc_stack_next++;
++
++      return 0;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++      void *ret = NULL;
++
++      if (!len || len > PAGE_SIZE)
++              goto out;
++
++      ret = kmalloc(len, GFP_KERNEL);
++
++      if (ret) {
++              if (alloc_push(ret)) {
++                      kfree(ret);
++                      ret = NULL;
++              }
++      }
++
++out:
++      return ret;
++}
++
++void *
++acl_alloc_num(unsigned long num, unsigned long len)
++{
++      if (!len || (num > (PAGE_SIZE / len)))
++              return NULL;
++
++      return acl_alloc(num * len);
++}
++
++void
++acl_free_all(void)
++{
++      if (gr_acl_is_enabled() || !alloc_stack)
++              return;
++
++      while (alloc_pop()) ;
++
++      if (alloc_stack) {
++              if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++                      kfree(alloc_stack);
++              else
++                      vfree(alloc_stack);
++      }
++
++      alloc_stack = NULL;
++      alloc_stack_size = 1;
++      alloc_stack_next = 1;
++
++      return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++      if ((size * sizeof (void *)) <= PAGE_SIZE)
++              alloc_stack =
++                  (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++      else
++              alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++      alloc_stack_size = size;
++
++      if (!alloc_stack)
++              return 0;
++      else
++              return 1;
++}
+diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
+new file mode 100644
+index 0000000..bdd51ea
+--- /dev/null
++++ b/grsecurity/gracl_cap.c
+@@ -0,0 +1,110 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++      struct acl_subject_label *curracl;
++      kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++      kernel_cap_t cap_audit = __cap_empty_set;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      curracl = task->acl;
++
++      cap_drop = curracl->cap_lower;
++      cap_mask = curracl->cap_mask;
++      cap_audit = curracl->cap_invert_audit;
++
++      while ((curracl = curracl->parent_subject)) {
++              /* if the cap isn't specified in the current computed mask but is specified in the
++                 current level subject, and is lowered in the current level subject, then add
++                 it to the set of dropped capabilities
++                 otherwise, add the current level subject's mask to the current computed mask
++               */
++              if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++                      cap_raise(cap_mask, cap);
++                      if (cap_raised(curracl->cap_lower, cap))
++                              cap_raise(cap_drop, cap);
++                      if (cap_raised(curracl->cap_invert_audit, cap))
++                              cap_raise(cap_audit, cap);
++              }
++      }
++
++      if (!cap_raised(cap_drop, cap)) {
++              if (cap_raised(cap_audit, cap))
++                      gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
++              return 1;
++      }
++
++      curracl = task->acl;
++
++      if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
++          && cap_raised(cred->cap_effective, cap)) {
++              security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++                             task->role->roletype, GR_GLOBAL_UID(cred->uid),
++                             GR_GLOBAL_GID(cred->gid), task->exec_file ?
++                             gr_to_filename(task->exec_file->f_path.dentry,
++                             task->exec_file->f_path.mnt) : curracl->filename,
++                             curracl->filename, 0UL,
++                             0UL, "", (unsigned long) cap, &task->signal->saved_ip);
++              return 1;
++      }
++
++      if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
++              gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
++
++      return 0;
++}
++
++int
++gr_acl_is_capable(const int cap)
++{
++      return gr_task_acl_is_capable(current, current_cred(), cap);
++}
++
++int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
++{
++      struct acl_subject_label *curracl;
++      kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      curracl = task->acl;
++
++      cap_drop = curracl->cap_lower;
++      cap_mask = curracl->cap_mask;
++
++      while ((curracl = curracl->parent_subject)) {
++              /* if the cap isn't specified in the current computed mask but is specified in the
++                 current level subject, and is lowered in the current level subject, then add
++                 it to the set of dropped capabilities
++                 otherwise, add the current level subject's mask to the current computed mask
++               */
++              if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++                      cap_raise(cap_mask, cap);
++                      if (cap_raised(curracl->cap_lower, cap))
++                              cap_raise(cap_drop, cap);
++              }
++      }
++
++      if (!cap_raised(cap_drop, cap))
++              return 1;
++
++      return 0;
++}
++
++int
++gr_acl_is_capable_nolog(const int cap)
++{
++      return gr_task_acl_is_capable_nolog(current, cap);
++}
++
+diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
+new file mode 100644
+index 0000000..a43dd06
+--- /dev/null
++++ b/grsecurity/gracl_compat.c
+@@ -0,0 +1,269 @@
++#include <linux/kernel.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++#include <linux/gracl_compat.h>
++
++#include <asm/uaccess.h>
++
++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
++{
++      struct gr_arg_wrapper_compat uwrapcompat;
++
++        if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
++                return -EFAULT;
++
++        if ((uwrapcompat.version != GRSECURITY_VERSION) ||
++          (uwrapcompat.size != sizeof(struct gr_arg_compat)))  
++                return -EINVAL;
++
++      uwrap->arg = compat_ptr(uwrapcompat.arg);
++      uwrap->version = uwrapcompat.version;
++      uwrap->size = sizeof(struct gr_arg);
++
++        return 0;
++}
++
++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++      struct gr_arg_compat argcompat;
++
++        if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
++                return -EFAULT;
++
++      arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
++      arg->role_db.num_pointers = argcompat.role_db.num_pointers;
++      arg->role_db.num_roles = argcompat.role_db.num_roles;
++      arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
++      arg->role_db.num_subjects = argcompat.role_db.num_subjects;
++      arg->role_db.num_objects = argcompat.role_db.num_objects;
++
++      memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
++      memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
++      memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
++      memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
++      arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
++      arg->segv_device = argcompat.segv_device;
++      arg->segv_inode = argcompat.segv_inode;
++      arg->segv_uid = argcompat.segv_uid;
++      arg->num_sprole_pws = argcompat.num_sprole_pws;
++      arg->mode = argcompat.mode;
++
++      return 0;
++}
++
++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++      struct acl_object_label_compat objcompat;
++
++      if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
++                return -EFAULT;
++
++      obj->filename = compat_ptr(objcompat.filename);
++      obj->inode = objcompat.inode;
++      obj->device = objcompat.device;
++      obj->mode = objcompat.mode;
++
++      obj->nested = compat_ptr(objcompat.nested);
++      obj->globbed = compat_ptr(objcompat.globbed);
++
++      obj->prev = compat_ptr(objcompat.prev);
++      obj->next = compat_ptr(objcompat.next);
++
++      return 0;
++}
++
++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++      unsigned int i;
++      struct acl_subject_label_compat subjcompat;
++
++      if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
++                return -EFAULT;
++
++      subj->filename = compat_ptr(subjcompat.filename);
++      subj->inode = subjcompat.inode;
++      subj->device = subjcompat.device;
++      subj->mode = subjcompat.mode;
++      subj->cap_mask = subjcompat.cap_mask;
++      subj->cap_lower = subjcompat.cap_lower;
++      subj->cap_invert_audit = subjcompat.cap_invert_audit;
++
++      for (i = 0; i < GR_NLIMITS; i++) {
++              if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
++                      subj->res[i].rlim_cur = RLIM_INFINITY;
++              else
++                      subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
++              if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
++                      subj->res[i].rlim_max = RLIM_INFINITY;
++              else
++                      subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
++      }
++      subj->resmask = subjcompat.resmask;
++
++      subj->user_trans_type = subjcompat.user_trans_type;
++      subj->group_trans_type = subjcompat.group_trans_type;
++      subj->user_transitions = compat_ptr(subjcompat.user_transitions);
++      subj->group_transitions = compat_ptr(subjcompat.group_transitions);
++      subj->user_trans_num = subjcompat.user_trans_num;
++      subj->group_trans_num = subjcompat.group_trans_num;
++
++      memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
++      memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
++      subj->ip_type = subjcompat.ip_type;
++      subj->ips = compat_ptr(subjcompat.ips);
++      subj->ip_num = subjcompat.ip_num;
++      subj->inaddr_any_override = subjcompat.inaddr_any_override;
++
++      subj->crashes = subjcompat.crashes;
++      subj->expires = subjcompat.expires;
++
++      subj->parent_subject = compat_ptr(subjcompat.parent_subject);
++      subj->hash = compat_ptr(subjcompat.hash);
++      subj->prev = compat_ptr(subjcompat.prev);
++      subj->next = compat_ptr(subjcompat.next);
++
++      subj->obj_hash = compat_ptr(subjcompat.obj_hash);
++      subj->obj_hash_size = subjcompat.obj_hash_size;
++      subj->pax_flags = subjcompat.pax_flags;
++
++      return 0;
++}
++
++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++      struct acl_role_label_compat rolecompat;
++
++      if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
++                return -EFAULT;
++
++      role->rolename = compat_ptr(rolecompat.rolename);
++      role->uidgid = rolecompat.uidgid;
++      role->roletype = rolecompat.roletype;
++
++      role->auth_attempts = rolecompat.auth_attempts;
++      role->expires = rolecompat.expires;
++
++      role->root_label = compat_ptr(rolecompat.root_label);
++      role->hash = compat_ptr(rolecompat.hash);
++
++      role->prev = compat_ptr(rolecompat.prev);
++      role->next = compat_ptr(rolecompat.next);
++
++      role->transitions = compat_ptr(rolecompat.transitions);
++      role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
++      role->domain_children = compat_ptr(rolecompat.domain_children);
++      role->domain_child_num = rolecompat.domain_child_num;
++
++      role->umask = rolecompat.umask;
++
++      role->subj_hash = compat_ptr(rolecompat.subj_hash);
++      role->subj_hash_size = rolecompat.subj_hash_size;
++
++      return 0;
++}
++
++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++      struct role_allowed_ip_compat roleip_compat;
++
++      if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
++                return -EFAULT;
++
++      roleip->addr = roleip_compat.addr;
++      roleip->netmask = roleip_compat.netmask;
++
++      roleip->prev = compat_ptr(roleip_compat.prev);
++      roleip->next = compat_ptr(roleip_compat.next);
++
++      return 0;
++}
++
++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
++{
++      struct role_transition_compat trans_compat;
++
++      if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
++                return -EFAULT;
++
++      trans->rolename = compat_ptr(trans_compat.rolename);
++
++      trans->prev = compat_ptr(trans_compat.prev);
++      trans->next = compat_ptr(trans_compat.next);
++
++      return 0;
++
++}
++
++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++      struct gr_hash_struct_compat hash_compat;
++
++      if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
++                return -EFAULT;
++
++      hash->table = compat_ptr(hash_compat.table);
++      hash->nametable = compat_ptr(hash_compat.nametable);
++      hash->first = compat_ptr(hash_compat.first);
++
++      hash->table_size = hash_compat.table_size;
++      hash->used_size = hash_compat.used_size;
++
++      hash->type = hash_compat.type;
++
++      return 0;
++}
++
++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
++{
++      compat_uptr_t ptrcompat;
++
++      if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
++                return -EFAULT;
++
++      *(void **)ptr = compat_ptr(ptrcompat);
++
++      return 0;
++}
++
++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++      struct acl_ip_label_compat ip_compat;
++
++      if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
++                return -EFAULT;
++
++      ip->iface = compat_ptr(ip_compat.iface);
++      ip->addr = ip_compat.addr;
++      ip->netmask = ip_compat.netmask;
++      ip->low = ip_compat.low;
++      ip->high = ip_compat.high;
++      ip->mode = ip_compat.mode;
++      ip->type = ip_compat.type;
++
++      memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
++
++      ip->prev = compat_ptr(ip_compat.prev);
++      ip->next = compat_ptr(ip_compat.next);
++
++      return 0;
++}
++
++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++      struct sprole_pw_compat pw_compat;
++
++      if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
++                return -EFAULT;
++
++      pw->rolename = compat_ptr(pw_compat.rolename);
++      memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
++      memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
++
++      return 0;
++}
++
++size_t get_gr_arg_wrapper_size_compat(void)
++{
++      return sizeof(struct gr_arg_wrapper_compat);
++}
++
+diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
+new file mode 100644
+index 0000000..a340c17
+--- /dev/null
++++ b/grsecurity/gracl_fs.c
+@@ -0,0 +1,431 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++umode_t
++gr_acl_umask(void)
++{
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      return current->role->umask;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++                        const struct vfsmount * mnt)
++{
++      __u32 mode;
++
++      if (unlikely(!dentry->d_inode))
++              return GR_FIND;
++
++      mode =
++          gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++      if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++              return mode;
++      } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_FIND)))
++              return 0;
++
++      return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++                 int acc_mode)
++{
++      __u32 reqmode = GR_FIND;
++      __u32 mode;
++
++      if (unlikely(!dentry->d_inode))
++              return reqmode;
++
++      if (acc_mode & MAY_APPEND)
++              reqmode |= GR_APPEND;
++      else if (acc_mode & MAY_WRITE)
++              reqmode |= GR_WRITE;
++      if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
++              reqmode |= GR_READ;
++
++      mode =
++          gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++                         mnt);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++                  const struct dentry * p_dentry,
++                  const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++                  const int imode)
++{
++      __u32 reqmode = GR_WRITE | GR_CREATE;
++      __u32 mode;
++
++      if (acc_mode & MAY_APPEND)
++              reqmode |= GR_APPEND;
++      // if a directory was required or the directory already exists, then
++      // don't count this open as a read
++      if ((acc_mode & MAY_READ) &&
++          !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
++              reqmode |= GR_READ;
++      if ((open_flags & O_CREAT) &&
++          ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++              reqmode |= GR_SETID;
++
++      mode =
++          gr_check_create(dentry, p_dentry, p_mnt,
++                          reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++                   const int fmode)
++{
++      __u32 mode, reqmode = GR_FIND;
++
++      if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++              reqmode |= GR_EXEC;
++      if (fmode & S_IWOTH)
++              reqmode |= GR_WRITE;
++      if (fmode & S_IROTH)
++              reqmode |= GR_READ;
++
++      mode =
++          gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++                         mnt);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : "",
++                             reqmode & GR_EXEC ? " executing" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : "",
++                             reqmode & GR_EXEC ? " executing" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
++{
++      __u32 mode;
++
++      mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
++
++      if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
++              return mode;
++      } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
++              return 0;
++      } else if (unlikely((mode & (reqmode)) != (reqmode)))
++              return 0;
++
++      return (reqmode);
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++                   umode_t *modeptr)
++{
++      umode_t mode;
++
++      *modeptr &= ~gr_acl_umask();
++      mode = *modeptr;
++
++      if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
++              return 1;
++
++      if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
++                   ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
++              return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++                                 GR_CHMOD_ACL_MSG);
++      } else {
++              return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++      }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++                         GR_UNIXCONNECT_ACL_MSG);
++}
++
++/* hardlinks require at minimum create and link permission,
++   any additional privilege required is based on the
++   privilege of the file being linked to
++*/
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++                 const struct dentry * parent_dentry,
++                 const struct vfsmount * parent_mnt,
++                 const struct dentry * old_dentry,
++                 const struct vfsmount * old_mnt, const struct filename *to)
++{
++      __u32 mode;
++      __u32 needmode = GR_CREATE | GR_LINK;
++      __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
++
++      mode =
++          gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++                        old_mnt);
++
++      if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
++              gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
++              return mode;
++      } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
++              return 0;
++      } else if (unlikely((mode & needmode) != needmode))
++              return 0;
++
++      return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++                    const struct dentry * parent_dentry,
++                    const struct vfsmount * parent_mnt, const struct filename *from)
++{
++      __u32 needmode = GR_WRITE | GR_CREATE;
++      __u32 mode;
++
++      mode =
++          gr_check_create(new_dentry, parent_dentry, parent_mnt,
++                          GR_CREATE | GR_AUDIT_CREATE |
++                          GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++      if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++              gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
++              return mode;
++      } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
++              return 0;
++      } else if (unlikely((mode & needmode) != needmode))
++              return 0;
++
++      return (GR_WRITE | GR_CREATE);
++}
++
++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
++{
++      __u32 mode;
++
++      mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++      if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
++              return mode;
++      } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
++              return 0;
++      } else if (unlikely((mode & (reqmode)) != (reqmode)))
++              return 0;
++
++      return (reqmode);
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt,
++                  const int mode)
++{
++      __u32 reqmode = GR_WRITE | GR_CREATE;
++      if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++              reqmode |= GR_SETID;
++
++      return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++                                reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++                  const struct dentry *parent_dentry,
++                  const struct vfsmount *parent_mnt)
++{
++      return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++                                GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++      (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++       ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++                   struct dentry *parent_dentry,
++                   const struct vfsmount *parent_mnt,
++                   struct dentry *old_dentry,
++                   struct inode *old_parent_inode,
++                   struct vfsmount *old_mnt, const struct filename *newname)
++{
++      __u32 comp1, comp2;
++      int error = 0;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      if (!new_dentry->d_inode) {
++              comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++                                      GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++                                      GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++              comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++                                     GR_DELETE | GR_AUDIT_DELETE |
++                                     GR_AUDIT_READ | GR_AUDIT_WRITE |
++                                     GR_SUPPRESS, old_mnt);
++      } else {
++              comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++                                     GR_CREATE | GR_DELETE |
++                                     GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++                                     GR_AUDIT_READ | GR_AUDIT_WRITE |
++                                     GR_SUPPRESS, parent_mnt);
++              comp2 =
++                  gr_search_file(old_dentry,
++                                 GR_READ | GR_WRITE | GR_AUDIT_READ |
++                                 GR_DELETE | GR_AUDIT_DELETE |
++                                 GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++      }
++
++      if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++          ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++              gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
++      else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++               && !(comp2 & GR_SUPPRESS)) {
++              gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
++              error = -EACCES;
++      } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++              error = -EACCES;
++
++      return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++      u16 id;
++      char *rolename;
++
++      if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
++          !(current->role->roletype & GR_ROLE_PERSIST))) {
++              id = current->acl_role_id;
++              rolename = current->role->rolename;
++              gr_set_acls(1);
++              gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
++      }
++
++      gr_put_exec_file(current);
++      return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      if (task != current && task->acl->mode & GR_PROTPROCFD)
++              return -EACCES;
++
++      return 0;
++}
+diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
+new file mode 100644
+index 0000000..8132048
+--- /dev/null
++++ b/grsecurity/gracl_ip.c
+@@ -0,0 +1,387 @@
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND                       0x01
++#define GR_CONNECT            0x02
++#define GR_INVERT             0x04
++#define GR_BINDOVERRIDE               0x08
++#define GR_CONNECTOVERRIDE    0x10
++#define GR_SOCK_FAMILY                0x20
++
++static const char * gr_protocols[IPPROTO_MAX] = {
++      "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++      "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++      "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++      "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++      "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++      "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++      "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++      "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++      "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++      "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
++      "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
++      "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++      "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++      "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++      "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++      "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++      "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++      "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++      "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++      "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++      "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++      "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++      "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++      "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++      "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++      "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++      "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++      "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++      "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++      "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++      "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++      "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++      };
++
++static const char * gr_socktypes[SOCK_MAX] = {
++      "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
++      "unknown:7", "unknown:8", "unknown:9", "packet"
++      };
++
++static const char * gr_sockfamilies[AF_MAX+1] = {
++      "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
++      "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
++      "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
++      "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
++      };
++
++const char *
++gr_proto_to_name(unsigned char proto)
++{
++      return gr_protocols[proto];
++}
++
++const char *
++gr_socktype_to_name(unsigned char type)
++{
++      return gr_socktypes[type];
++}
++
++const char *
++gr_sockfamily_to_name(unsigned char family)
++{
++      return gr_sockfamilies[family];
++}
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++      struct acl_subject_label *curr;
++      const struct cred *cred = current_cred();
++
++      if (unlikely(!gr_acl_is_enabled()))
++              goto exit;
++
++      if ((domain < 0) || (type < 0) || (protocol < 0) ||
++          (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
++              goto exit;      // let the kernel handle it
++
++      curr = current->acl;
++
++      if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
++              /* the family is allowed, if this is PF_INET allow it only if
++                 the extra sock type/protocol checks pass */
++              if (domain == PF_INET)
++                      goto inet_check;
++              goto exit;
++      } else {
++              if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
++                                     &current->signal->saved_ip);
++                      goto exit;
++              }
++              goto exit_fail;
++      }
++
++inet_check:
++      /* the rest of this checking is for IPv4 only */
++      if (!curr->ips)
++              goto exit;
++
++      if ((curr->ip_type & (1U << type)) &&
++          (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
++              goto exit;
++
++      if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              /* we don't place acls on raw sockets , and sometimes
++                 dgram/ip sockets are opened for ioctl and not
++                 bind/connect, so we'll fake a bind learn log */
++              if (type == SOCK_RAW || type == SOCK_PACKET) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, 0, type,
++                                     protocol, GR_CONNECT, &current->signal->saved_ip);
++              } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, 0, type,
++                                     protocol, GR_BIND, &current->signal->saved_ip);
++              }
++              /* we'll log when they use connect or bind */
++              goto exit;
++      }
++
++exit_fail:
++      if (domain == PF_INET)
++              gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), 
++                          gr_socktype_to_name(type), gr_proto_to_name(protocol));
++      else
++#ifndef CONFIG_IPV6
++              if (domain != PF_INET6)
++#endif
++              gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), 
++                          gr_socktype_to_name(type), protocol);
++
++      return 0;
++exit:
++      return 1;
++}
++
++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
++{
++      if ((ip->mode & mode) &&
++          (ip_port >= ip->low) &&
++          (ip_port <= ip->high) &&
++          ((ntohl(ip_addr) & our_netmask) ==
++           (ntohl(our_addr) & our_netmask))
++          && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
++          && (ip->type & (1U << type))) {
++              if (ip->mode & GR_INVERT)
++                      return 2; // specifically denied
++              else
++                      return 1; // allowed
++      }
++
++      return 0; // not specifically allowed, may continue parsing
++}
++
++static int
++gr_search_connectbind(const int full_mode, struct sock *sk,
++                    struct sockaddr_in *addr, const int type)
++{
++      char iface[IFNAMSIZ] = {0};
++      struct acl_subject_label *curr;
++      struct acl_ip_label *ip;
++      struct inet_sock *isk;
++      struct net_device *dev;
++      struct in_device *idev;
++      unsigned long i;
++      int ret;
++      int mode = full_mode & (GR_BIND | GR_CONNECT);
++      __u32 ip_addr = 0;
++      __u32 our_addr;
++      __u32 our_netmask;
++      char *p;
++      __u16 ip_port = 0;
++      const struct cred *cred = current_cred();
++
++      if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++              return 0;
++
++      curr = current->acl;
++      isk = inet_sk(sk);
++
++      /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
++      if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
++              addr->sin_addr.s_addr = curr->inaddr_any_override;
++      if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
++              struct sockaddr_in saddr;
++              int err;
++
++              saddr.sin_family = AF_INET;
++              saddr.sin_addr.s_addr = curr->inaddr_any_override;
++              saddr.sin_port = isk->inet_sport;
++
++              err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++              if (err)
++                      return err;
++
++              err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++              if (err)
++                      return err;
++      }
++
++      if (!curr->ips)
++              return 0;
++
++      ip_addr = addr->sin_addr.s_addr;
++      ip_port = ntohs(addr->sin_port);
++
++      if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                             current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                             GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                             gr_to_filename(current->exec_file->f_path.dentry,
++                             current->exec_file->f_path.mnt) :
++                             curr->filename, curr->filename,
++                             &ip_addr, ip_port, type,
++                             sk->sk_protocol, mode, &current->signal->saved_ip);
++              return 0;
++      }
++
++      for (i = 0; i < curr->ip_num; i++) {
++              ip = *(curr->ips + i);
++              if (ip->iface != NULL) {
++                      strncpy(iface, ip->iface, IFNAMSIZ - 1);
++                      p = strchr(iface, ':');
++                      if (p != NULL)
++                              *p = '\0';
++                      dev = dev_get_by_name(sock_net(sk), iface);
++                      if (dev == NULL)
++                              continue;
++                      idev = in_dev_get(dev);
++                      if (idev == NULL) {
++                              dev_put(dev);
++                              continue;
++                      }
++                      rcu_read_lock();
++                      for_ifa(idev) {
++                              if (!strcmp(ip->iface, ifa->ifa_label)) {
++                                      our_addr = ifa->ifa_address;
++                                      our_netmask = 0xffffffff;
++                                      ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++                                      if (ret == 1) {
++                                              rcu_read_unlock();
++                                              in_dev_put(idev);
++                                              dev_put(dev);
++                                              return 0;
++                                      } else if (ret == 2) {
++                                              rcu_read_unlock();
++                                              in_dev_put(idev);
++                                              dev_put(dev);
++                                              goto denied;
++                                      }
++                              }
++                      } endfor_ifa(idev);
++                      rcu_read_unlock();
++                      in_dev_put(idev);
++                      dev_put(dev);
++              } else {
++                      our_addr = ip->addr;
++                      our_netmask = ip->netmask;
++                      ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++                      if (ret == 1)
++                              return 0;
++                      else if (ret == 2)
++                              goto denied;
++              }
++      }
++
++denied:
++      if (mode == GR_BIND)
++              gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++      else if (mode == GR_CONNECT)
++              gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++
++      return -EACCES;
++}
++
++int
++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
++{
++      /* always allow disconnection of dgram sockets with connect */
++      if (addr->sin_family == AF_UNSPEC)
++              return 0;
++      return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
++{
++      return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int gr_search_listen(struct socket *sock)
++{
++      struct sock *sk = sock->sk;
++      struct sockaddr_in addr;
++
++      addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++      addr.sin_port = inet_sk(sk)->inet_sport;
++
++      return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int gr_search_accept(struct socket *sock)
++{
++      struct sock *sk = sock->sk;
++      struct sockaddr_in addr;
++
++      addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++      addr.sin_port = inet_sk(sk)->inet_sport;
++
++      return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++      if (addr)
++              return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++      else {
++              struct sockaddr_in sin;
++              const struct inet_sock *inet = inet_sk(sk);
++
++              sin.sin_addr.s_addr = inet->inet_daddr;
++              sin.sin_port = inet->inet_dport;
++
++              return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++      }
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++      struct sockaddr_in sin;
++
++      if (unlikely(skb->len < sizeof (struct udphdr)))
++              return 0;       // skip this packet
++
++      sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
++      sin.sin_port = udp_hdr(skb)->source;
++
++      return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++}
+diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
+new file mode 100644
+index 0000000..25f54ef
+--- /dev/null
++++ b/grsecurity/gracl_learn.c
+@@ -0,0 +1,207 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
++                                 size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static DEFINE_SPINLOCK(gr_learn_lock);
++static DEFINE_MUTEX(gr_learn_user_mutex);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++   uses a semaphore around the userspace copying, and the other kernel contexts
++   use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
++{
++      DECLARE_WAITQUEUE(wait, current);
++      ssize_t retval = 0;
++
++      add_wait_queue(&learn_wait, &wait);
++      set_current_state(TASK_INTERRUPTIBLE);
++      do {
++              mutex_lock(&gr_learn_user_mutex);
++              spin_lock(&gr_learn_lock);
++              if (learn_buffer_len)
++                      break;
++              spin_unlock(&gr_learn_lock);
++              mutex_unlock(&gr_learn_user_mutex);
++              if (file->f_flags & O_NONBLOCK) {
++                      retval = -EAGAIN;
++                      goto out;
++              }
++              if (signal_pending(current)) {
++                      retval = -ERESTARTSYS;
++                      goto out;
++              }
++
++              schedule();
++      } while (1);
++
++      memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++      learn_buffer_user_len = learn_buffer_len;
++      retval = learn_buffer_len;
++      learn_buffer_len = 0;
++
++      spin_unlock(&gr_learn_lock);
++
++      if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++              retval = -EFAULT;
++
++      mutex_unlock(&gr_learn_user_mutex);
++out:
++      set_current_state(TASK_RUNNING);
++      remove_wait_queue(&learn_wait, &wait);
++      return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++      poll_wait(file, &learn_wait, wait);
++
++      if (learn_buffer_len)
++              return (POLLIN | POLLRDNORM);
++
++      return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++      char *tmp;
++
++      mutex_lock(&gr_learn_user_mutex);
++      spin_lock(&gr_learn_lock);
++      tmp = learn_buffer;
++      learn_buffer = NULL;
++      spin_unlock(&gr_learn_lock);
++      if (tmp)
++              vfree(tmp);
++      if (learn_buffer_user != NULL) {
++              vfree(learn_buffer_user);
++              learn_buffer_user = NULL;
++      }
++      learn_buffer_len = 0;
++      mutex_unlock(&gr_learn_user_mutex);
++
++      return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++      va_list args;
++      unsigned int len;
++
++      if (!gr_learn_attached)
++              return;
++
++      spin_lock(&gr_learn_lock);
++
++      /* leave a gap at the end so we know when it's "full" but don't have to
++         compute the exact length of the string we're trying to append
++      */
++      if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++              spin_unlock(&gr_learn_lock);
++              wake_up_interruptible(&learn_wait);
++              return;
++      }
++      if (learn_buffer == NULL) {
++              spin_unlock(&gr_learn_lock);
++              return;
++      }
++
++      va_start(args, fmt);
++      len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++      va_end(args);
++
++      learn_buffer_len += len + 1;
++
++      spin_unlock(&gr_learn_lock);
++      wake_up_interruptible(&learn_wait);
++
++      return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++      if (file->f_mode & FMODE_READ && gr_learn_attached)
++              return -EBUSY;
++      if (file->f_mode & FMODE_READ) {
++              int retval = 0;
++              mutex_lock(&gr_learn_user_mutex);
++              if (learn_buffer == NULL)
++                      learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++              if (learn_buffer_user == NULL)
++                      learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++              if (learn_buffer == NULL) {
++                      retval = -ENOMEM;
++                      goto out_error;
++              }
++              if (learn_buffer_user == NULL) {
++                      retval = -ENOMEM;
++                      goto out_error;
++              }
++              learn_buffer_len = 0;
++              learn_buffer_user_len = 0;
++              gr_learn_attached = 1;
++out_error:
++              mutex_unlock(&gr_learn_user_mutex);
++              return retval;
++      }
++      return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++      if (file->f_mode & FMODE_READ) {
++              char *tmp = NULL;
++              mutex_lock(&gr_learn_user_mutex);
++              spin_lock(&gr_learn_lock);
++              tmp = learn_buffer;
++              learn_buffer = NULL;
++              spin_unlock(&gr_learn_lock);
++              if (tmp)
++                      vfree(tmp);
++              if (learn_buffer_user != NULL) {
++                      vfree(learn_buffer_user);
++                      learn_buffer_user = NULL;
++              }
++              learn_buffer_len = 0;
++              learn_buffer_user_len = 0;
++              gr_learn_attached = 0;
++              mutex_unlock(&gr_learn_user_mutex);
++      }
++
++      return 0;
++}
++              
++const struct file_operations grsec_fops = {
++      .read           = read_learn,
++      .write          = write_grsec_handler,
++      .open           = open_learn,
++      .release        = close_learn,
++      .poll           = poll_learn,
++};
+diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
+new file mode 100644
+index 0000000..39645c9
+--- /dev/null
++++ b/grsecurity/gracl_res.c
+@@ -0,0 +1,68 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[] = {
++      [RLIMIT_CPU] = "RLIMIT_CPU",
++      [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
++      [RLIMIT_DATA] = "RLIMIT_DATA",
++      [RLIMIT_STACK] = "RLIMIT_STACK",
++      [RLIMIT_CORE] = "RLIMIT_CORE",
++      [RLIMIT_RSS] = "RLIMIT_RSS",
++      [RLIMIT_NPROC] = "RLIMIT_NPROC",
++      [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
++      [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
++      [RLIMIT_AS] = "RLIMIT_AS",
++      [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
++      [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
++      [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
++      [RLIMIT_NICE] = "RLIMIT_NICE",
++      [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
++      [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
++      [GR_CRASH_RES] = "RLIMIT_CRASH"
++};
++
++void
++gr_log_resource(const struct task_struct *task,
++              const int res, const unsigned long wanted, const int gt)
++{
++      const struct cred *cred;
++      unsigned long rlim;
++
++      if (!gr_acl_is_enabled() && !grsec_resource_logging)
++              return;
++
++      // not yet supported resource
++      if (unlikely(!restab_log[res]))
++              return;
++
++      if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
++              rlim = task_rlimit_max(task, res);
++      else
++              rlim = task_rlimit(task, res);
++
++      if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
++              return;
++
++      rcu_read_lock();
++      cred = __task_cred(task);
++
++      if (res == RLIMIT_NPROC && 
++          (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || 
++           cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
++              goto out_rcu_unlock;
++      else if (res == RLIMIT_MEMLOCK &&
++               cap_raised(cred->cap_effective, CAP_IPC_LOCK))
++              goto out_rcu_unlock;
++      else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
++              goto out_rcu_unlock;
++      rcu_read_unlock();
++
++      gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
++
++      return;
++out_rcu_unlock:
++      rcu_read_unlock();
++      return;
++}
+diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
+new file mode 100644
+index 0000000..3c38bfe
+--- /dev/null
++++ b/grsecurity/gracl_segv.c
+@@ -0,0 +1,305 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static DEFINE_SPINLOCK(gr_uid_lock);
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++      lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++                            struct acl_role_label *role);
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++      if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++              return BTRFS_I(dentry->d_inode)->root->anon_dev;
++      else
++#endif
++              return dentry->d_sb->s_dev;
++}
++
++int
++gr_init_uidset(void)
++{
++      uid_set =
++          kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++      uid_used = 0;
++
++      return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++      if (uid_set)
++              kfree(uid_set);
++
++      return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++      struct crash_uid *tmp = uid_set;
++      uid_t buid;
++      int low = 0, high = uid_used - 1, mid;
++
++      while (high >= low) {
++              mid = (low + high) >> 1;
++              buid = tmp[mid].uid;
++              if (buid == uid)
++                      return mid;
++              if (buid > uid)
++                      high = mid - 1;
++              if (buid < uid)
++                      low = mid + 1;
++      }
++
++      return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++      unsigned short i, j;
++      struct crash_uid index;
++
++      for (i = 1; i < uid_used; i++) {
++              index = uid_set[i];
++              j = i;
++              while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++                      uid_set[j] = uid_set[j - 1];
++                      j--;
++              }
++              uid_set[j] = index;
++      }
++
++      return;
++}
++
++static __inline__ void
++gr_insert_uid(const kuid_t kuid, const unsigned long expires)
++{
++      int loc;
++      uid_t uid = GR_GLOBAL_UID(kuid);
++
++      if (uid_used == GR_UIDTABLE_MAX)
++              return;
++
++      loc = gr_find_uid(uid);
++
++      if (loc >= 0) {
++              uid_set[loc].expires = expires;
++              return;
++      }
++
++      uid_set[uid_used].uid = uid;
++      uid_set[uid_used].expires = expires;
++      uid_used++;
++
++      gr_insertsort();
++
++      return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++      unsigned short i;
++
++      for (i = loc + 1; i < uid_used; i++)
++              uid_set[i - 1] = uid_set[i];
++
++      uid_used--;
++
++      return;
++}
++
++int
++gr_check_crash_uid(const kuid_t kuid)
++{
++      int loc;
++      int ret = 0;
++      uid_t uid;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      uid = GR_GLOBAL_UID(kuid);
++
++      spin_lock(&gr_uid_lock);
++      loc = gr_find_uid(uid);
++
++      if (loc < 0)
++              goto out_unlock;
++
++      if (time_before_eq(uid_set[loc].expires, get_seconds()))
++              gr_remove_uid(loc);
++      else
++              ret = 1;
++
++out_unlock:
++      spin_unlock(&gr_uid_lock);
++      return ret;
++}
++
++static __inline__ int
++proc_is_setxid(const struct cred *cred)
++{
++      if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
++          !uid_eq(cred->uid, cred->fsuid))
++              return 1;
++      if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
++          !gid_eq(cred->gid, cred->fsgid))
++              return 1;
++
++      return 0;
++}
++
++extern int gr_fake_force_sig(int sig, struct task_struct *t);
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++      struct acl_subject_label *curr;
++      struct task_struct *tsk, *tsk2;
++      const struct cred *cred;
++      const struct cred *cred2;
++
++      if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++              return;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return;
++
++      curr = task->acl;
++
++      if (!(curr->resmask & (1U << GR_CRASH_RES)))
++              return;
++
++      if (time_before_eq(curr->expires, get_seconds())) {
++              curr->expires = 0;
++              curr->crashes = 0;
++      }
++
++      curr->crashes++;
++
++      if (!curr->expires)
++              curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++      if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++          time_after(curr->expires, get_seconds())) {
++              rcu_read_lock();
++              cred = __task_cred(task);
++              if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
++                      gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++                      spin_lock(&gr_uid_lock);
++                      gr_insert_uid(cred->uid, curr->expires);
++                      spin_unlock(&gr_uid_lock);
++                      curr->expires = 0;
++                      curr->crashes = 0;
++                      read_lock(&tasklist_lock);
++                      do_each_thread(tsk2, tsk) {
++                              cred2 = __task_cred(tsk);
++                              if (tsk != task && uid_eq(cred2->uid, cred->uid))
++                                      gr_fake_force_sig(SIGKILL, tsk);
++                      } while_each_thread(tsk2, tsk);
++                      read_unlock(&tasklist_lock);
++              } else {
++                      gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++                      read_lock(&tasklist_lock);
++                      read_lock(&grsec_exec_file_lock);
++                      do_each_thread(tsk2, tsk) {
++                              if (likely(tsk != task)) {
++                                      // if this thread has the same subject as the one that triggered
++                                      // RES_CRASH and it's the same binary, kill it
++                                      if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
++                                              gr_fake_force_sig(SIGKILL, tsk);
++                              }
++                      } while_each_thread(tsk2, tsk);
++                      read_unlock(&grsec_exec_file_lock);
++                      read_unlock(&tasklist_lock);
++              }
++              rcu_read_unlock();
++      }
++
++      return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++      struct acl_subject_label *curr;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      read_lock(&gr_inode_lock);
++      curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
++                                   __get_dev(filp->f_path.dentry),
++                                   current->role);
++      read_unlock(&gr_inode_lock);
++
++      if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
++          (!curr->crashes && !curr->expires))
++              return 0;
++
++      if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++          time_after(curr->expires, get_seconds()))
++              return 1;
++      else if (time_before_eq(curr->expires, get_seconds())) {
++              curr->crashes = 0;
++              curr->expires = 0;
++      }
++
++      return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++      struct acl_subject_label *curracl;
++      __u32 curr_ip;
++      struct task_struct *p, *p2;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return;
++
++      curracl = task->acl;
++      curr_ip = task->signal->curr_ip;
++
++      if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
++              read_lock(&tasklist_lock);
++              do_each_thread(p2, p) {
++                      if (p->signal->curr_ip == curr_ip)
++                              gr_fake_force_sig(SIGKILL, p);
++              } while_each_thread(p2, p);
++              read_unlock(&tasklist_lock);
++      } else if (curracl->mode & GR_KILLPROC)
++              gr_fake_force_sig(SIGKILL, task);
++
++      return;
++}
+diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
+new file mode 100644
+index 0000000..98011b0
+--- /dev/null
++++ b/grsecurity/gracl_shm.c
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const time_t shm_createtime, const kuid_t cuid, const int shmid)
++{
++      struct task_struct *task;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++
++      task = find_task_by_vpid(shm_cprid);
++
++      if (unlikely(!task))
++              task = find_task_by_vpid(shm_lapid);
++
++      if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
++                            (task_pid_nr(task) == shm_lapid)) &&
++                   (task->acl->mode & GR_PROTSHM) &&
++                   (task->acl != current->acl))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
++              return 0;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return 1;
++}
+diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
+new file mode 100644
+index 0000000..bc0be01
+--- /dev/null
++++ b/grsecurity/grsec_chdir.c
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      if ((grsec_enable_chdir && grsec_enable_group &&
++           in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++                                            !grsec_enable_group)) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
++      }
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
+new file mode 100644
+index 0000000..bd6e105
+--- /dev/null
++++ b/grsecurity/grsec_chroot.c
+@@ -0,0 +1,370 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include "../fs/mount.h"
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++static int gr_init_ran;
++#endif
++
++void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
++                           path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++                           && gr_init_ran
++#endif
++         )
++              task->gr_is_chrooted = 1;
++      else {
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++              if (task_pid_nr(task) == 1 && !gr_init_ran)
++                      gr_init_ran = 1;
++#endif
++              task->gr_is_chrooted = 0;
++      }
++
++      task->gr_chroot_dentry = path->dentry;
++#endif
++      return;
++}
++
++void gr_clear_chroot_entries(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++      task->gr_is_chrooted = 0;
++      task->gr_chroot_dentry = NULL;
++#endif
++      return;
++}     
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      struct task_struct *p;
++
++      if (unlikely(!grsec_enable_chroot_unix))
++              return 1;
++
++      if (likely(!proc_is_chrooted(current)))
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      p = find_task_by_vpid_unrestricted(pid);
++      if (unlikely(p && !have_same_root(current, p))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
++              return 0;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++#endif
++      return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++                      && proc_is_chrooted(current)) {
++              gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      struct task_struct *p;
++      int ret = 0;
++      if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
++              return ret;
++
++      read_lock(&tasklist_lock);
++      do_each_pid_task(pid, type, p) {
++              if (!have_same_root(current, p)) {
++                      ret = 1;
++                      goto out;
++              }
++      } while_each_pid_task(pid, type, p);
++out:
++      read_unlock(&tasklist_lock);
++      return ret;
++#endif
++      return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
++              return 0;
++
++      if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
++          !have_same_root(current, p)) {
++              return 1;
++      }
++#endif
++      return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++      struct path path, currentroot;
++      int ret = 0;
++
++      path.dentry = (struct dentry *)u_dentry;
++      path.mnt = (struct vfsmount *)u_mnt;
++      get_fs_root(current->fs, &currentroot);
++      if (path_is_under(&path, &currentroot))
++              ret = 1;
++      path_put(&currentroot);
++
++      return ret;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      if (!grsec_enable_chroot_fchdir)
++              return 1;
++
++      if (!proc_is_chrooted(current))
++              return 1;
++      else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
++              return 0;
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      struct task_struct *p;
++      time_t starttime;
++
++      if (unlikely(!grsec_enable_chroot_shmat))
++              return 1;
++
++      if (likely(!proc_is_chrooted(current)))
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++
++      if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
++              starttime = p->start_time.tv_sec;
++              if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
++                      if (have_same_root(current, p)) {
++                              goto allow;
++                      } else {
++                              read_unlock(&tasklist_lock);
++                              rcu_read_unlock();
++                              gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++                              return 0;
++                      }
++              }
++              /* creator exited, pid reuse, fall through to next check */
++      }
++      if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
++              if (unlikely(!have_same_root(current, p))) {
++                      read_unlock(&tasklist_lock);
++                      rcu_read_unlock();
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++                      return 0;
++              }
++      }
++
++allow:
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++#endif
++      return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++              gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
++#endif
++      return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && 
++          proc_is_chrooted(current)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++              gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++          !gr_is_outside_chroot(dentry, mnt)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int
++gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++              kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++              if (cap_raised(chroot_caps, cap)) {
++                      if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
++                              gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
++                      }
++                      return 0;
++              }
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      return gr_task_chroot_is_capable(current, current_cred(), cap);
++#endif
++      return 1;
++}
++
++int
++gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++              kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++              if (cap_raised(chroot_caps, cap)) {
++                      return 0;
++              }
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      return gr_task_chroot_is_capable_nolog(current, cap);
++#endif
++      return 1;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
++          proc_is_chrooted(current))
++              return -EACCES;
++#endif
++      return 0;
++}
++
++void
++gr_handle_chroot_chdir(const struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      if (grsec_enable_chroot_chdir)
++              set_fs_pwd(current->fs, path);
++#endif
++      return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      /* allow chmod +s on directories, but not files */
++      if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
++          ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++          proc_is_chrooted(current)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
+new file mode 100644
+index 0000000..ce65ceb
+--- /dev/null
++++ b/grsecurity/grsec_disabled.c
+@@ -0,0 +1,434 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__u32
++gr_handle_sysctl(const struct ctl_table * table, const int op)
++{
++      return 0;
++}
++#endif
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++      return 0;
++}
++#endif
++
++int
++gr_acl_is_enabled(void)
++{
++      return 0;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) 
++{
++      return;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++      return 0;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++      return;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++      return 0;
++}
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_set_acls(const int type)
++{
++      return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++      return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++      return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++      return;
++}
++
++void
++gr_set_pax_flags(struct task_struct *task)
++{
++      return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++                const int unsafe_share)
++{
++      return 0;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++      return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++      return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++      return 0;
++}
++
++int
++gr_check_crash_uid(const kuid_t uid)
++{
++      return 0;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++               struct dentry *old_dentry,
++               struct dentry *new_dentry,
++               struct vfsmount *mnt, const __u8 replace)
++{
++      return;
++}
++
++int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++      return 1;
++}
++
++int
++gr_search_connectbind(const int mode, const struct socket *sock,
++                    const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++      return;
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++                        const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++                 int acc_mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++                 unsigned int *vm_flags)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++                     const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry,
++                   const struct vfsmount * mnt, const int fmode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++                  umode_t *mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++void
++grsecurity_init(void)
++{
++      return;
++}
++
++umode_t gr_acl_umask(void)
++{
++      return 0;
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt,
++                  const int mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++                    const struct dentry * parent_dentry,
++                    const struct vfsmount * parent_mnt, const struct filename *from)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++                 const struct dentry * parent_dentry,
++                 const struct vfsmount * parent_mnt,
++                 const struct dentry * old_dentry,
++                 const struct vfsmount * old_mnt, const struct filename *to)
++{
++      return 1;
++}
++
++int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++                   const struct dentry *parent_dentry,
++                   const struct vfsmount *parent_mnt,
++                   const struct dentry *old_dentry,
++                   const struct inode *old_parent_inode,
++                   const struct vfsmount *old_mnt, const struct filename *newname)
++{
++      return 0;
++}
++
++int
++gr_acl_handle_filldir(const struct file *file, const char *name,
++                    const int namelen, const ino_t ino)
++{
++      return 1;
++}
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const time_t shm_createtime, const kuid_t cuid, const int shmid)
++{
++      return 1;
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++int
++gr_search_accept(const struct socket *sock)
++{
++      return 0;
++}
++
++int
++gr_search_listen(const struct socket *sock)
++{
++      return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++                  const struct dentry * p_dentry,
++                  const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++                  const int imode)
++{
++      return 1;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++      return;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++      return 1;
++}
++
++void
++gr_set_role_label(const kuid_t uid, const kgid_t gid)
++{
++      return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++      return 0;
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++void
++gr_set_kernel_label(struct task_struct *task)
++{
++      return;
++}
++
++int
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
++{
++      return 0;
++}
++
++int
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
++{
++      return 0;
++}
++
++int gr_acl_enable_at_secure(void)
++{
++      return 0;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++      return dentry->d_sb->s_dev;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++      return;
++}
++
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
+diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
+new file mode 100644
+index 0000000..387032b
+--- /dev/null
++++ b/grsecurity/grsec_exec.c
+@@ -0,0 +1,187 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++#include <linux/module.h>
++#include <linux/compat.h>
++
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++static char gr_exec_arg_buf[132];
++static DEFINE_MUTEX(gr_exec_arg_mutex);
++#endif
++
++struct user_arg_ptr {
++#ifdef CONFIG_COMPAT
++      bool is_compat;
++#endif
++      union {
++              const char __user *const __user *native;
++#ifdef CONFIG_COMPAT
++              const compat_uptr_t __user *compat;
++#endif
++      } ptr;
++};
++
++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      char *grarg = gr_exec_arg_buf;
++      unsigned int i, x, execlen = 0;
++      char c;
++
++      if (!((grsec_enable_execlog && grsec_enable_group &&
++             in_group_p(grsec_audit_gid))
++            || (grsec_enable_execlog && !grsec_enable_group)))
++              return;
++
++      mutex_lock(&gr_exec_arg_mutex);
++      memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++      for (i = 0; i < bprm->argc && execlen < 128; i++) {
++              const char __user *p;
++              unsigned int len;
++
++              p = get_user_arg_ptr(argv, i);
++              if (IS_ERR(p))
++                      goto log;
++
++              len = strnlen_user(p, 128 - execlen);
++              if (len > 128 - execlen)
++                      len = 128 - execlen;
++              else if (len > 0)
++                      len--;
++              if (copy_from_user(grarg + execlen, p, len))
++                      goto log;
++
++              /* rewrite unprintable characters */
++              for (x = 0; x < len; x++) {
++                      c = *(grarg + execlen + x);
++                      if (c < 32 || c > 126)
++                              *(grarg + execlen + x) = ' ';
++              }
++
++              execlen += len;
++              *(grarg + execlen) = ' ';
++              *(grarg + execlen + 1) = '\0';
++              execlen++;
++      }
++
++      log:
++      gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++                      bprm->file->f_path.mnt, grarg);
++      mutex_unlock(&gr_exec_arg_mutex);
++#endif
++      return;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int gr_acl_is_capable(const int cap);
++extern int gr_acl_is_capable_nolog(const int cap);
++extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
++extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
++extern int gr_chroot_is_capable(const int cap);
++extern int gr_chroot_is_capable_nolog(const int cap);
++extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
++extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
++#endif
++
++const char *captab_log[] = {
++      "CAP_CHOWN",
++      "CAP_DAC_OVERRIDE",
++      "CAP_DAC_READ_SEARCH",
++      "CAP_FOWNER",
++      "CAP_FSETID",
++      "CAP_KILL",
++      "CAP_SETGID",
++      "CAP_SETUID",
++      "CAP_SETPCAP",
++      "CAP_LINUX_IMMUTABLE",
++      "CAP_NET_BIND_SERVICE",
++      "CAP_NET_BROADCAST",
++      "CAP_NET_ADMIN",
++      "CAP_NET_RAW",
++      "CAP_IPC_LOCK",
++      "CAP_IPC_OWNER",
++      "CAP_SYS_MODULE",
++      "CAP_SYS_RAWIO",
++      "CAP_SYS_CHROOT",
++      "CAP_SYS_PTRACE",
++      "CAP_SYS_PACCT",
++      "CAP_SYS_ADMIN",
++      "CAP_SYS_BOOT",
++      "CAP_SYS_NICE",
++      "CAP_SYS_RESOURCE",
++      "CAP_SYS_TIME",
++      "CAP_SYS_TTY_CONFIG",
++      "CAP_MKNOD",
++      "CAP_LEASE",
++      "CAP_AUDIT_WRITE",
++      "CAP_AUDIT_CONTROL",
++      "CAP_SETFCAP",
++      "CAP_MAC_OVERRIDE",
++      "CAP_MAC_ADMIN",
++      "CAP_SYSLOG",
++      "CAP_WAKE_ALARM"
++};
++
++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
++
++int gr_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
++EXPORT_SYMBOL(gr_task_is_capable);
++EXPORT_SYMBOL(gr_task_is_capable_nolog);
+diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
+new file mode 100644
+index 0000000..06cc6ea
+--- /dev/null
++++ b/grsecurity/grsec_fifo.c
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++             const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++      const struct cred *cred = current_cred();
++
++      if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++          !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++          !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
++          !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
++              if (!inode_permission(dentry->d_inode, acc_mode))
++                      gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
+new file mode 100644
+index 0000000..8ca18bf
+--- /dev/null
++++ b/grsecurity/grsec_fork.c
+@@ -0,0 +1,23 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/errno.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
++              switch (retval) {
++                      case -EAGAIN:
++                              gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
++                              break;
++                      case -ENOMEM:
++                              gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
++                              break;
++              }
++      }
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
+new file mode 100644
+index 0000000..ab2d875
+--- /dev/null
++++ b/grsecurity/grsec_init.c
+@@ -0,0 +1,279 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++int grsec_enable_ptrace_readexec;
++int grsec_enable_setxid;
++int grsec_enable_symlinkown;
++kgid_t grsec_symlinkown_gid;
++int grsec_enable_brute;
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_harden_ptrace;
++int grsec_enable_fifo;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_audit_ptrace;
++int grsec_enable_time;
++int grsec_enable_group;
++kgid_t grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_mount;
++int grsec_enable_rofs;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++kgid_t grsec_tpe_gid;
++int grsec_enable_blackhole;
++#ifdef CONFIG_IPV6_MODULE
++EXPORT_SYMBOL(grsec_enable_blackhole);
++#endif
++int grsec_lastack_retries;
++int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
++int grsec_enable_socket_all;
++kgid_t grsec_socket_all_gid;
++int grsec_enable_socket_client;
++kgid_t grsec_socket_client_gid;
++int grsec_enable_socket_server;
++kgid_t grsec_socket_server_gid;
++int grsec_resource_logging;
++int grsec_disable_privio;
++int grsec_enable_log_rwxmaps;
++int grsec_lock;
++
++DEFINE_SPINLOCK(grsec_alert_lock);
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++DEFINE_SPINLOCK(grsec_audit_lock);
++
++DEFINE_RWLOCK(grsec_exec_file_lock);
++
++char *gr_shared_page[4];
++
++char *gr_alert_log_fmt;
++char *gr_audit_log_fmt;
++char *gr_alert_log_buf;
++char *gr_audit_log_buf;
++
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++
++void __init
++grsecurity_init(void)
++{
++      int j;
++      /* create the per-cpu shared pages */
++
++#ifdef CONFIG_X86
++      memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
++#endif
++
++      for (j = 0; j < 4; j++) {
++              gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
++              if (gr_shared_page[j] == NULL) {
++                      panic("Unable to allocate grsecurity shared page");
++                      return;
++              }
++      }
++
++      /* allocate log buffers */
++      gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
++      if (!gr_alert_log_fmt) {
++              panic("Unable to allocate grsecurity alert log format buffer");
++              return;
++      }
++      gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
++      if (!gr_audit_log_fmt) {
++              panic("Unable to allocate grsecurity audit log format buffer");
++              return;
++      }
++      gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++      if (!gr_alert_log_buf) {
++              panic("Unable to allocate grsecurity alert log buffer");
++              return;
++      }
++      gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++      if (!gr_audit_log_buf) {
++              panic("Unable to allocate grsecurity audit log buffer");
++              return;
++      }
++
++      /* allocate memory for authentication structure */
++      gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++      gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++      gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++      if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++              panic("Unable to allocate grsecurity authentication structure");
++              return;
++      }
++
++
++#ifdef CONFIG_GRKERNSEC_IO
++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
++      grsec_disable_privio = 1;
++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++      grsec_disable_privio = 1;
++#else
++      grsec_disable_privio = 0;
++#endif
++#endif
++
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++      /* for backward compatibility, tpe_invert always defaults to on if
++         enabled in the kernel
++      */
++      grsec_enable_tpe_invert = 1;
++#endif
++
++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++      grsec_lock = 1;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      grsec_enable_log_rwxmaps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++      grsec_enable_group = 1;
++      grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      grsec_enable_ptrace_readexec = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      grsec_enable_harden_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++      grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      grsec_enable_brute = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++      grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      grsec_enable_blackhole = 1;
++      grsec_lastack_retries = 4;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++      grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      grsec_enable_setxid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++      grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++      grsec_resource_logging = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      grsec_enable_audit_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      grsec_enable_symlinkown = 1;
++      grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++      grsec_enable_tpe = 1;
++      grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      grsec_enable_socket_all = 1;
++      grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      grsec_enable_socket_client = 1;
++      grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      grsec_enable_socket_server = 1;
++      grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
++#endif
++#endif
++
++      return;
++}
+diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
+new file mode 100644
+index 0000000..5e05e20
+--- /dev/null
++++ b/grsecurity/grsec_link.c
+@@ -0,0 +1,58 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
++{
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      const struct inode *link_inode = link->dentry->d_inode;
++
++      if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
++         /* ignore root-owned links, e.g. /proc/self */
++          gr_is_global_nonroot(link_inode->i_uid) && target &&
++          !uid_eq(link_inode->i_uid, target->i_uid)) {
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
++              return 1;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_follow_link(const struct inode *parent,
++                    const struct inode *inode,
++                    const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++      const struct cred *cred = current_cred();
++
++      if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++          (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
++          (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++                 const struct vfsmount *mnt,
++                 struct inode *inode, const int mode, const struct filename *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++      const struct cred *cred = current_cred();
++
++      if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
++          (!S_ISREG(mode) || is_privileged_binary(dentry) || 
++           (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
++          !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
++              gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
+new file mode 100644
+index 0000000..dbe0a6b
+--- /dev/null
++++ b/grsecurity/grsec_log.c
+@@ -0,0 +1,341 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/tty.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_TREE_PREEMPT_RCU
++#define DISABLE_PREEMPT() preempt_disable()
++#define ENABLE_PREEMPT() preempt_enable()
++#else
++#define DISABLE_PREEMPT()
++#define ENABLE_PREEMPT()
++#endif
++
++#define BEGIN_LOCKS(x) \
++      DISABLE_PREEMPT(); \
++      rcu_read_lock(); \
++      read_lock(&tasklist_lock); \
++      read_lock(&grsec_exec_file_lock); \
++      if (x != GR_DO_AUDIT) \
++              spin_lock(&grsec_alert_lock); \
++      else \
++              spin_lock(&grsec_audit_lock)
++
++#define END_LOCKS(x) \
++      if (x != GR_DO_AUDIT) \
++              spin_unlock(&grsec_alert_lock); \
++      else \
++              spin_unlock(&grsec_audit_lock); \
++      read_unlock(&grsec_exec_file_lock); \
++      read_unlock(&tasklist_lock); \
++      rcu_read_unlock(); \
++      ENABLE_PREEMPT(); \
++      if (x == GR_DONT_AUDIT) \
++              gr_handle_alertkill(current)
++
++enum {
++      FLOODING,
++      NO_FLOODING
++};
++
++extern char *gr_alert_log_fmt;
++extern char *gr_audit_log_fmt;
++extern char *gr_alert_log_buf;
++extern char *gr_audit_log_buf;
++
++static int gr_log_start(int audit)
++{
++      char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
++      char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
++      unsigned long curr_secs = get_seconds();
++
++      if (audit == GR_DO_AUDIT)
++              goto set_fmt;
++
++      if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
++              grsec_alert_wtime = curr_secs;
++              grsec_alert_fyet = 0;
++      } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
++                  && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
++              grsec_alert_fyet++;
++      } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
++              grsec_alert_wtime = curr_secs;
++              grsec_alert_fyet++;
++              printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
++              return FLOODING;
++      }
++      else return FLOODING;
++
++set_fmt:
++#endif
++      memset(buf, 0, PAGE_SIZE);
++      if (current->signal->curr_ip && gr_acl_is_enabled()) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++      } else if (current->signal->curr_ip) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
++      } else if (gr_acl_is_enabled()) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++      } else {
++              sprintf(fmt, "%s%s", loglevel, "grsec: ");
++              strcpy(buf, fmt);
++      }
++
++      return NO_FLOODING;
++}
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++      __attribute__ ((format (printf, 2, 0)));
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      unsigned int len = strlen(buf);
++
++      vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++
++      return;
++}
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++      __attribute__ ((format (printf, 2, 3)));
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      unsigned int len = strlen(buf);
++      va_list ap;
++
++      va_start(ap, msg);
++      vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++      va_end(ap);
++
++      return;
++}
++
++static void gr_log_end(int audit, int append_default)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      if (append_default) {
++              struct task_struct *task = current;
++              struct task_struct *parent = task->real_parent;
++              const struct cred *cred = __task_cred(task);
++              const struct cred *pcred = __task_cred(parent);
++              unsigned int len = strlen(buf);
++
++              snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++      }
++
++      printk("%s\n", buf);
++
++      return;
++}
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
++{
++      int logtype;
++      char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
++      char *str1 = NULL, *str2 = NULL, *str3 = NULL;
++      void *voidptr = NULL;
++      int num1 = 0, num2 = 0;
++      unsigned long ulong1 = 0, ulong2 = 0;
++      struct dentry *dentry = NULL;
++      struct vfsmount *mnt = NULL;
++      struct file *file = NULL;
++      struct task_struct *task = NULL;
++      struct vm_area_struct *vma = NULL;
++      const struct cred *cred, *pcred;
++      va_list ap;
++
++      BEGIN_LOCKS(audit);
++      logtype = gr_log_start(audit);
++      if (logtype == FLOODING) {
++              END_LOCKS(audit);
++              return;
++      }
++      va_start(ap, argtypes);
++      switch (argtypes) {
++      case GR_TTYSNIFF:
++              task = va_arg(ap, struct task_struct *);
++              gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
++              break;
++      case GR_SYSCTL_HIDDEN:
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, str1);
++              break;
++      case GR_RBAC:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
++              break;
++      case GR_RBAC_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
++              break;
++      case GR_STR_RBAC:
++              str1 = va_arg(ap, char *);
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
++              break;
++      case GR_RBAC_MODE2:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              str2 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
++              break;
++      case GR_RBAC_MODE3:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              str2 = va_arg(ap, char *);
++              str3 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
++              break;
++      case GR_FILENAME:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
++              break;
++      case GR_STR_FILENAME:
++              str1 = va_arg(ap, char *);
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
++              break;
++      case GR_FILENAME_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
++              break;
++      case GR_FILENAME_TWO_INT:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              num1 = va_arg(ap, int);
++              num2 = va_arg(ap, int);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
++              break;
++      case GR_FILENAME_TWO_INT_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              num1 = va_arg(ap, int);
++              num2 = va_arg(ap, int);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
++              break;
++      case GR_TEXTREL:
++              file = va_arg(ap, struct file *);
++              ulong1 = va_arg(ap, unsigned long);
++              ulong2 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
++              break;
++      case GR_PTRACE:
++              task = va_arg(ap, struct task_struct *);
++              gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
++              break;
++      case GR_RESOURCE:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              str1 = va_arg(ap, char *);
++              ulong2 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_CAP:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_SIG:
++              str1 = va_arg(ap, char *);
++              voidptr = va_arg(ap, void *);
++              gr_log_middle_varargs(audit, msg, str1, voidptr);
++              break;
++      case GR_SIG2:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              num1 = va_arg(ap, int);
++              gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_CRASH1:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
++              break;
++      case GR_CRASH2:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
++              break;
++      case GR_RWXMAP:
++              file = va_arg(ap, struct file *);
++              gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
++              break;
++      case GR_RWXMAPVMA:
++              vma = va_arg(ap, struct vm_area_struct *);
++              if (vma->vm_file)
++                      str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
++              else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++                      str1 = "<stack>";
++              else if (vma->vm_start <= current->mm->brk &&
++                       vma->vm_end >= current->mm->start_brk)
++                      str1 = "<heap>";
++              else
++                      str1 = "<anonymous mapping>";
++              gr_log_middle_varargs(audit, msg, str1);
++              break;
++      case GR_PSACCT:
++              {
++                      unsigned int wday, cday;
++                      __u8 whr, chr;
++                      __u8 wmin, cmin;
++                      __u8 wsec, csec;
++                      char cur_tty[64] = { 0 };
++                      char parent_tty[64] = { 0 };
++
++                      task = va_arg(ap, struct task_struct *);
++                      wday = va_arg(ap, unsigned int);
++                      cday = va_arg(ap, unsigned int);
++                      whr = va_arg(ap, int);
++                      chr = va_arg(ap, int);
++                      wmin = va_arg(ap, int);
++                      cmin = va_arg(ap, int);
++                      wsec = va_arg(ap, int);
++                      csec = va_arg(ap, int);
++                      ulong1 = va_arg(ap, unsigned long);
++                      cred = __task_cred(task);
++                      pcred = __task_cred(task->real_parent);
++
++                      gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              }
++              break;
++      default:
++              gr_log_middle(audit, msg, ap);
++      }
++      va_end(ap);
++      // these don't need DEFAULTSECARGS printed on the end
++      if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
++              gr_log_end(audit, 0);
++      else
++              gr_log_end(audit, 1);
++      END_LOCKS(audit);
++}
+diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
+new file mode 100644
+index 0000000..f536303
+--- /dev/null
++++ b/grsecurity/grsec_mem.c
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
++      return;
++}
++
++void
++gr_handle_iopl(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
++      return;
++}
++
++void
++gr_handle_mem_readwrite(u64 from, u64 to)
++{
++      gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
++      return;
++}
++
++void
++gr_handle_vm86(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
++      return;
++}
++
++void
++gr_log_badprocpid(const char *entry)
++{
++      gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
++      return;
++}
+diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
+new file mode 100644
+index 0000000..2131422
+--- /dev/null
++++ b/grsecurity/grsec_mount.c
+@@ -0,0 +1,62 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mount.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++      return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++      return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
++#endif
++      return;
++}
++
++int
++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++      if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
++              return -EPERM;
++      } else
++              return 0;
++#endif
++      return 0;
++}
++
++int
++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++      if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
++          dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
++              return -EPERM;
++      } else
++              return 0;
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
+new file mode 100644
+index 0000000..6ee9d50
+--- /dev/null
++++ b/grsecurity/grsec_pax.c
+@@ -0,0 +1,45 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++      return;
++}
++
++void gr_log_ptgnustack(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
++#endif
++      return;
++}
++
++void
++gr_log_rwxmmap(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
++#endif
++      return;
++}
++
++void
++gr_log_rwxmprotect(struct vm_area_struct *vma)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
+new file mode 100644
+index 0000000..f7f29aa
+--- /dev/null
++++ b/grsecurity/grsec_ptrace.c
+@@ -0,0 +1,30 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/security.h>
++
++void
++gr_audit_ptrace(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      if (grsec_enable_audit_ptrace)
++              gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
++#endif
++      return;
++}
++
++int
++gr_ptrace_readexec(struct file *file, int unsafe_flags)
++{
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      const struct dentry *dentry = file->f_path.dentry;
++      const struct vfsmount *mnt = file->f_path.mnt;
++
++      if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && 
++          (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
+new file mode 100644
+index 0000000..4e29cc7
+--- /dev/null
++++ b/grsecurity/grsec_sig.c
+@@ -0,0 +1,246 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/delay.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/hardirq.h>
++
++char *signames[] = {
++      [SIGSEGV] = "Segmentation fault",
++      [SIGILL] = "Illegal instruction",
++      [SIGABRT] = "Abort",
++      [SIGBUS] = "Invalid alignment/Bus error"
++};
++
++void
++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++                                  (sig == SIGABRT) || (sig == SIGBUS))) {
++              if (task_pid_nr(t) == task_pid_nr(current)) {
++                      gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
++              } else {
++                      gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
++              }
++      }
++#endif
++      return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++      /* ignore the 0 signal for protected task checks */
++      if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
++              gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
++              return -EPERM;
++      } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int gr_fake_force_sig(int sig, struct task_struct *t)
++{
++      unsigned long int flags;
++      int ret, blocked, ignored;
++      struct k_sigaction *action;
++
++      spin_lock_irqsave(&t->sighand->siglock, flags);
++      action = &t->sighand->action[sig-1];
++      ignored = action->sa.sa_handler == SIG_IGN;
++      blocked = sigismember(&t->blocked, sig);
++      if (blocked || ignored) {
++              action->sa.sa_handler = SIG_DFL;
++              if (blocked) {
++                      sigdelset(&t->blocked, sig);
++                      recalc_sigpending_and_wake(t);
++              }
++      }
++      if (action->sa.sa_handler == SIG_DFL)
++              t->signal->flags &= ~SIGNAL_UNKILLABLE;
++      ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
++
++      spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++      return ret;
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++#define GR_USER_BAN_TIME (15 * 60)
++#define GR_DAEMON_BRUTE_TIME (30 * 60)
++
++static int __get_dumpable(unsigned long mm_flags)
++{
++      int ret;
++
++      ret = mm_flags & MMF_DUMPABLE_MASK;
++      return (ret >= 2) ? 2 : ret;
++}
++#endif
++
++void gr_handle_brute_attach(unsigned long mm_flags)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct task_struct *p = current;
++      kuid_t uid = GLOBAL_ROOT_UID;
++      int daemon = 0;
++
++      if (!grsec_enable_brute)
++              return;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
++              p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
++              p->real_parent->brute = 1;
++              daemon = 1;
++      } else {
++              const struct cred *cred = __task_cred(p), *cred2;
++              struct task_struct *tsk, *tsk2;
++
++              if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
++                      struct user_struct *user;
++
++                      uid = cred->uid;
++
++                      /* this is put upon execution past expiration */
++                      user = find_user(uid);
++                      if (user == NULL)
++                              goto unlock;
++                      user->suid_banned = 1;
++                      user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
++                      if (user->suid_ban_expires == ~0UL)
++                              user->suid_ban_expires--;
++
++                      /* only kill other threads of the same binary, from the same user */
++                      do_each_thread(tsk2, tsk) {
++                              cred2 = __task_cred(tsk);
++                              if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
++                                      gr_fake_force_sig(SIGKILL, tsk);
++                      } while_each_thread(tsk2, tsk);
++              }
++      }
++unlock:
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      if (gr_is_global_nonroot(uid))
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
++      else if (daemon)
++              gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
++
++#endif
++      return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct task_struct *p = current;
++
++      if (unlikely(p->brute)) {
++              if (!grsec_enable_brute)
++                      p->brute = 0;
++              else if (time_before(get_seconds(), p->brute_expires))
++                      msleep(30 * 1000);
++      }
++#endif
++      return;
++}
++
++void gr_handle_kernel_exploit(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      const struct cred *cred;
++      struct task_struct *tsk, *tsk2;
++      struct user_struct *user;
++      kuid_t uid;
++
++      if (in_irq() || in_serving_softirq() || in_nmi())
++              panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
++
++      uid = current_uid();
++
++      if (gr_is_global_root(uid))
++              panic("grsec: halting the system due to suspicious kernel crash caused by root");
++      else {
++              /* kill all the processes of this user, hold a reference
++                 to their creds struct, and prevent them from creating
++                 another process until system reset
++              */
++              printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
++                      GR_GLOBAL_UID(uid));
++              /* we intentionally leak this ref */
++              user = get_uid(current->cred->user);
++              if (user)
++                      user->kernel_banned = 1;
++
++              /* kill all processes of this user */
++              read_lock(&tasklist_lock);
++              do_each_thread(tsk2, tsk) {
++                      cred = __task_cred(tsk);
++                      if (uid_eq(cred->uid, uid))
++                              gr_fake_force_sig(SIGKILL, tsk);
++              } while_each_thread(tsk2, tsk);
++              read_unlock(&tasklist_lock); 
++      }
++#endif
++}
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++static bool suid_ban_expired(struct user_struct *user)
++{
++      if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
++              user->suid_banned = 0;
++              user->suid_ban_expires = 0;
++              free_uid(user);
++              return true;
++      }
++
++      return false;
++}
++#endif
++
++int gr_process_kernel_exec_ban(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      if (unlikely(current->cred->user->kernel_banned))
++              return -EPERM;
++#endif
++      return 0;
++}
++
++int gr_process_kernel_setuid_ban(struct user_struct *user)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      if (unlikely(user->kernel_banned))
++              gr_fake_force_sig(SIGKILL, current);
++#endif
++      return 0;
++}
++
++int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct user_struct *user = current->cred->user;
++      if (unlikely(user->suid_banned)) {
++              if (suid_ban_expired(user))
++                      return 0;
++              /* disallow execution of suid binaries only */
++              else if (!uid_eq(bprm->cred->euid, current->cred->uid))
++                      return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
+new file mode 100644
+index 0000000..4030d57
+--- /dev/null
++++ b/grsecurity/grsec_sock.c
+@@ -0,0 +1,244 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define gr_conn_table_size 32749
++struct conn_table_entry {
++      struct conn_table_entry *next;
++      struct signal_struct *sig;
++};
++
++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
++DEFINE_SPINLOCK(gr_conn_table_lock);
++
++extern const char * gr_socktype_to_name(unsigned char type);
++extern const char * gr_proto_to_name(unsigned char proto);
++extern const char * gr_sockfamily_to_name(unsigned char family);
++
++static __inline__ int 
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++      return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, 
++         __u16 sport, __u16 dport)
++{
++      if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
++                   sig->gr_sport == sport && sig->gr_dport == dport))
++              return 1;
++      else
++              return 0;
++}
++
++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
++{
++      struct conn_table_entry **match;
++      unsigned int index;
++
++      index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
++                        sig->gr_sport, sig->gr_dport, 
++                        gr_conn_table_size);
++
++      newent->sig = sig;
++      
++      match = &gr_conn_table[index];
++      newent->next = *match;
++      *match = newent;
++
++      return;
++}
++
++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
++{
++      struct conn_table_entry *match, *last = NULL;
++      unsigned int index;
++
++      index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
++                        sig->gr_sport, sig->gr_dport, 
++                        gr_conn_table_size);
++
++      match = gr_conn_table[index];
++      while (match && !conn_match(match->sig, 
++              sig->gr_saddr, sig->gr_daddr, sig->gr_sport, 
++              sig->gr_dport)) {
++              last = match;
++              match = match->next;
++      }
++
++      if (match) {
++              if (last)
++                      last->next = match->next;
++              else
++                      gr_conn_table[index] = NULL;
++              kfree(match);
++      }
++
++      return;
++}
++
++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++                                           __u16 sport, __u16 dport)
++{
++      struct conn_table_entry *match;
++      unsigned int index;
++
++      index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++      match = gr_conn_table[index];
++      while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
++              match = match->next;
++
++      if (match)
++              return match->sig;
++      else
++              return NULL;
++}
++
++#endif
++
++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct signal_struct *sig = task->signal;
++      struct conn_table_entry *newent;
++
++      newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
++      if (newent == NULL)
++              return;
++      /* no bh lock needed since we are called with bh disabled */
++      spin_lock(&gr_conn_table_lock);
++      gr_del_task_from_ip_table_nolock(sig);
++      sig->gr_saddr = inet->inet_rcv_saddr;
++      sig->gr_daddr = inet->inet_daddr;
++      sig->gr_sport = inet->inet_sport;
++      sig->gr_dport = inet->inet_dport;
++      gr_add_to_task_ip_table_nolock(sig, newent);
++      spin_unlock(&gr_conn_table_lock);
++#endif
++      return;
++}
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++      spin_lock_bh(&gr_conn_table_lock);
++      gr_del_task_from_ip_table_nolock(task->signal);
++      spin_unlock_bh(&gr_conn_table_lock);
++#endif
++      return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct signal_struct *p, *set;
++      const struct inet_sock *inet = inet_sk(sk);     
++
++      if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++              return;
++
++      set = current->signal;
++
++      spin_lock_bh(&gr_conn_table_lock);
++      p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
++                                  inet->inet_dport, inet->inet_sport);
++      if (unlikely(p != NULL)) {
++              set->curr_ip = p->curr_ip;
++              set->used_accept = 1;
++              gr_del_task_from_ip_table_nolock(p);
++              spin_unlock_bh(&gr_conn_table_lock);
++              return;
++      }
++      spin_unlock_bh(&gr_conn_table_lock);
++
++      set->curr_ip = inet->inet_daddr;
++      set->used_accept = 1;
++#endif
++      return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++          (family != AF_UNIX)) {
++              if (family == AF_INET)
++                      gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
++              else
++                      gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      if (grsec_enable_socket_server &&
++          in_group_p(grsec_socket_server_gid) &&
++          sck && (sck->sa_family != AF_UNIX) &&
++          (sck->sa_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_server_other(const struct sock *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      if (grsec_enable_socket_server &&
++          in_group_p(grsec_socket_server_gid) &&
++          sck && (sck->sk_family != AF_UNIX) &&
++          (sck->sk_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++          sck && (sck->sa_family != AF_UNIX) &&
++          (sck->sa_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
+new file mode 100644
+index 0000000..7624d1c
+--- /dev/null
++++ b/grsecurity/grsec_sysctl.c
+@@ -0,0 +1,460 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++      if (dirname == NULL || name == NULL)
++              return 0;
++      if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
++              gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC_ROFS
++static int __maybe_unused one = 1;
++#endif
++
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++struct ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
++#ifdef CONFIG_GRKERNSEC_IO
++      {
++              .procname       = "disable_priv_io",
++              .data           = &grsec_disable_privio,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++      {
++              .procname       = "linking_restrictions",
++              .data           = &grsec_enable_link,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      {
++              .procname       = "enforce_symlinksifowner",
++              .data           = &grsec_enable_symlinkown,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "symlinkown_gid",
++              .data           = &grsec_symlinkown_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      {
++              .procname       = "deter_bruteforce",
++              .data           = &grsec_enable_brute,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++      {
++              .procname       = "fifo_restrictions",
++              .data           = &grsec_enable_fifo,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      {
++              .procname       = "ptrace_readexec",
++              .data           = &grsec_enable_ptrace_readexec,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      {
++              .procname       = "consistent_setxid",
++              .data           = &grsec_enable_setxid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      {
++              .procname       = "ip_blackhole",
++              .data           = &grsec_enable_blackhole,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "lastack_retries",
++              .data           = &grsec_lastack_retries,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      {
++              .procname       = "exec_logging",
++              .data           = &grsec_enable_execlog,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      {
++              .procname       = "rwxmap_logging",
++              .data           = &grsec_enable_log_rwxmaps,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      {
++              .procname       = "signal_logging",
++              .data           = &grsec_enable_signal,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      {
++              .procname       = "forkfail_logging",
++              .data           = &grsec_enable_forkfail,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++      {
++              .procname       = "timechange_logging",
++              .data           = &grsec_enable_time,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      {
++              .procname       = "chroot_deny_shmat",
++              .data           = &grsec_enable_chroot_shmat,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      {
++              .procname       = "chroot_deny_unix",
++              .data           = &grsec_enable_chroot_unix,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      {
++              .procname       = "chroot_deny_mount",
++              .data           = &grsec_enable_chroot_mount,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      {
++              .procname       = "chroot_deny_fchdir",
++              .data           = &grsec_enable_chroot_fchdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      {
++              .procname       = "chroot_deny_chroot",
++              .data           = &grsec_enable_chroot_double,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      {
++              .procname       = "chroot_deny_pivot",
++              .data           = &grsec_enable_chroot_pivot,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      {
++              .procname       = "chroot_enforce_chdir",
++              .data           = &grsec_enable_chroot_chdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      {
++              .procname       = "chroot_deny_chmod",
++              .data           = &grsec_enable_chroot_chmod,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      {
++              .procname       = "chroot_deny_mknod",
++              .data           = &grsec_enable_chroot_mknod,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      {
++              .procname       = "chroot_restrict_nice",
++              .data           = &grsec_enable_chroot_nice,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      {
++              .procname       = "chroot_execlog",
++              .data           = &grsec_enable_chroot_execlog,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      {
++              .procname       = "chroot_caps",
++              .data           = &grsec_enable_chroot_caps,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      {
++              .procname       = "chroot_deny_sysctl",
++              .data           = &grsec_enable_chroot_sysctl,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++      {
++              .procname       = "tpe",
++              .data           = &grsec_enable_tpe,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "tpe_gid",
++              .data           = &grsec_tpe_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++      {
++              .procname       = "tpe_invert",
++              .data           = &grsec_enable_tpe_invert,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      {
++              .procname       = "tpe_restrict_all",
++              .data           = &grsec_enable_tpe_all,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      {
++              .procname       = "socket_all",
++              .data           = &grsec_enable_socket_all,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "socket_all_gid",
++              .data           = &grsec_socket_all_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      {
++              .procname       = "socket_client",
++              .data           = &grsec_enable_socket_client,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "socket_client_gid",
++              .data           = &grsec_socket_client_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      {
++              .procname       = "socket_server",
++              .data           = &grsec_enable_socket_server,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "socket_server_gid",
++              .data           = &grsec_socket_server_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++      {
++              .procname       = "audit_group",
++              .data           = &grsec_enable_group,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++      {
++              .procname       = "audit_gid",
++              .data           = &grsec_audit_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      {
++              .procname       = "audit_chdir",
++              .data           = &grsec_enable_chdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      {
++              .procname       = "audit_mount",
++              .data           = &grsec_enable_mount,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++      {
++              .procname       = "dmesg",
++              .data           = &grsec_enable_dmesg,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      {
++              .procname       = "chroot_findtask",
++              .data           = &grsec_enable_chroot_findtask,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++      {
++              .procname       = "resource_logging",
++              .data           = &grsec_resource_logging,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      {
++              .procname       = "audit_ptrace",
++              .data           = &grsec_enable_audit_ptrace,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      {
++              .procname       = "harden_ptrace",
++              .data           = &grsec_enable_harden_ptrace,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++      {
++              .procname       = "grsec_lock",
++              .data           = &grsec_lock,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_ROFS
++      {
++              .procname       = "romount_protect",
++              .data           = &grsec_enable_rofs,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_minmax,
++              .extra1         = &one,
++              .extra2         = &one,
++      },
++#endif
++      { }
++};
++#endif
+diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
+new file mode 100644
+index 0000000..0dc13c3
+--- /dev/null
++++ b/grsecurity/grsec_time.c
+@@ -0,0 +1,16 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++      if (grsec_enable_time)
++              gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
++#endif
++      return;
++}
++
++EXPORT_SYMBOL(gr_log_timechange);
+diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
+new file mode 100644
+index 0000000..ee57dcf
+--- /dev/null
++++ b/grsecurity/grsec_tpe.c
+@@ -0,0 +1,73 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct inode *inode = file->f_path.dentry->d_parent->d_inode;
++      const struct cred *cred = current_cred();
++      char *msg = NULL;
++      char *msg2 = NULL;
++
++      // never restrict root
++      if (gr_is_global_root(cred->uid))
++              return 1;
++
++      if (grsec_enable_tpe) {
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++              if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
++                      msg = "not being in trusted group";
++              else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
++                      msg = "being in untrusted group";
++#else
++              if (in_group_p(grsec_tpe_gid))
++                      msg = "being in untrusted group";
++#endif
++      }
++      if (!msg && gr_acl_tpe_check())
++              msg = "being in untrusted role";
++
++      // not in any affected group/role
++      if (!msg)
++              goto next_check;
++
++      if (gr_is_global_nonroot(inode->i_uid))
++              msg2 = "file in non-root-owned directory";
++      else if (inode->i_mode & S_IWOTH)
++              msg2 = "file in world-writable directory";
++      else if (inode->i_mode & S_IWGRP)
++              msg2 = "file in group-writable directory";
++
++      if (msg && msg2) {
++              char fullmsg[70] = {0};
++              snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
++              gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      }
++      msg = NULL;
++next_check:
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      if (!grsec_enable_tpe || !grsec_enable_tpe_all)
++              return 1;
++
++      if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
++              msg = "directory not owned by user";
++      else if (inode->i_mode & S_IWOTH)
++              msg = "file in world-writable directory";
++      else if (inode->i_mode & S_IWGRP)
++              msg = "file in group-writable directory";
++
++      if (msg) {
++              gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      }
++#endif
++#endif
++      return 1;
++}
+diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
+new file mode 100644
+index 0000000..9f7b1ac
+--- /dev/null
++++ b/grsecurity/grsum.c
+@@ -0,0 +1,61 @@
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++      char *p;
++      struct crypto_hash *tfm;
++      struct hash_desc desc;
++      struct scatterlist sg;
++      unsigned char temp_sum[GR_SHA_LEN];
++      volatile int retval = 0;
++      volatile int dummy = 0;
++      unsigned int i;
++
++      sg_init_table(&sg, 1);
++
++      tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++      if (IS_ERR(tfm)) {
++              /* should never happen, since sha256 should be built in */
++              return 1;
++      }
++
++      desc.tfm = tfm;
++      desc.flags = 0;
++
++      crypto_hash_init(&desc);
++
++      p = salt;
++      sg_set_buf(&sg, p, GR_SALT_LEN);
++      crypto_hash_update(&desc, &sg, sg.length);
++
++      p = entry->pw;
++      sg_set_buf(&sg, p, strlen(p));
++      
++      crypto_hash_update(&desc, &sg, sg.length);
++
++      crypto_hash_final(&desc, temp_sum);
++
++      memset(entry->pw, 0, GR_PW_LEN);
++
++      for (i = 0; i < GR_SHA_LEN; i++)
++              if (sum[i] != temp_sum[i])
++                      retval = 1;
++              else
++                      dummy = 1;      // waste a cycle
++
++      crypto_free_hash(tfm);
++
++      return retval;
++}
+diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
+index 77ff547..181834f 100644
+--- a/include/asm-generic/4level-fixup.h
++++ b/include/asm-generic/4level-fixup.h
+@@ -13,8 +13,10 @@
+ #define pmd_alloc(mm, pud, address) \
+       ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+               NULL: pmd_offset(pud, address))
++#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
+ #define pud_alloc(mm, pgd, address)   (pgd)
++#define pud_alloc_kernel(mm, pgd, address)    pud_alloc((mm), (pgd), (address))
+ #define pud_offset(pgd, start)                (pgd)
+ #define pud_none(pud)                 0
+ #define pud_bad(pud)                  0
+diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
+index b7babf0..04ad282 100644
+--- a/include/asm-generic/atomic-long.h
++++ b/include/asm-generic/atomic-long.h
+@@ -22,6 +22,12 @@
+ typedef atomic64_t atomic_long_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic64_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic64_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i)   ATOMIC64_INIT(i)
+ static inline long atomic_long_read(atomic_long_t *l)
+@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
+       return (long)atomic64_read(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      return (long)atomic64_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
+       atomic64_set(v, i);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      atomic64_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
+       atomic64_inc(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      atomic64_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
+       atomic64_dec(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      atomic64_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
+       atomic64_add(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      atomic64_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
+       atomic64_sub(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      atomic64_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
+       return (long)atomic64_add_return(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      return (long)atomic64_add_return_unchecked(i, v);
++}
++#endif
++
+ static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
+       return (long)atomic64_inc_return(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++      return (long)atomic64_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+       atomic64_t *v = (atomic64_t *)l;
+@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+ typedef atomic_t atomic_long_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i)   ATOMIC_INIT(i)
+ static inline long atomic_long_read(atomic_long_t *l)
+ {
+@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
+       return (long)atomic_read(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      return (long)atomic_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
+       atomic_set(v, i);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      atomic_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
+       atomic_inc(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      atomic_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
+       atomic_dec(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      atomic_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
+       atomic_add(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      atomic_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
+       atomic_sub(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      atomic_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
+       return (long)atomic_add_return(i, v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      return (long)atomic_add_return_unchecked(i, v);
++}
++
++#endif
++
+ static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
+       return (long)atomic_inc_return(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++      atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++      return (long)atomic_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+       atomic_t *v = (atomic_t *)l;
+@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+ #endif  /*  BITS_PER_LONG == 64  */
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void pax_refcount_needs_these_functions(void)
++{
++      atomic_read_unchecked((atomic_unchecked_t *)NULL);
++      atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
++      atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_inc_unchecked((atomic_unchecked_t *)NULL);
++      (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
++      atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
++      atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_dec_unchecked((atomic_unchecked_t *)NULL);
++      atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
++      (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++#ifdef CONFIG_X86
++      atomic_clear_mask_unchecked(0, NULL);
++      atomic_set_mask_unchecked(0, NULL);
++#endif
++
++      atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
++      atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
++}
++#else
++#define atomic_read_unchecked(v) atomic_read(v)
++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
++#define atomic_dec_unchecked(v) atomic_dec(v)
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
++
++#define atomic_long_read_unchecked(v) atomic_long_read(v)
++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
++#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
++#endif
++
+ #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
+diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
+index 33bd2de..f31bff97 100644
+--- a/include/asm-generic/atomic.h
++++ b/include/asm-generic/atomic.h
+@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+  * Atomically clears the bits set in @mask from @v
+  */
+ #ifndef atomic_clear_mask
+-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+ {
+       unsigned long flags;
+diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
+index b18ce4f..2ee2843 100644
+--- a/include/asm-generic/atomic64.h
++++ b/include/asm-generic/atomic64.h
+@@ -16,6 +16,8 @@ typedef struct {
+       long long counter;
+ } atomic64_t;
++typedef atomic64_t atomic64_unchecked_t;
++
+ #define ATOMIC64_INIT(i)      { (i) }
+ extern long long atomic64_read(const atomic64_t *v);
+@@ -39,4 +41,14 @@ extern int   atomic64_add_unless(atomic64_t *v, long long a, long long u);
+ #define atomic64_dec_and_test(v)      (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1LL, 0LL)
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
+diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
+index 1bfcfe5..e04c5c9 100644
+--- a/include/asm-generic/cache.h
++++ b/include/asm-generic/cache.h
+@@ -6,7 +6,7 @@
+  * cache lines need to provide their own cache.h.
+  */
+-#define L1_CACHE_SHIFT                5
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT                5UL
++#define L1_CACHE_BYTES                (1UL << L1_CACHE_SHIFT)
+ #endif /* __ASM_GENERIC_CACHE_H */
+diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
+index 0d68a1e..b74a761 100644
+--- a/include/asm-generic/emergency-restart.h
++++ b/include/asm-generic/emergency-restart.h
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+ #define _ASM_GENERIC_EMERGENCY_RESTART_H
+-static inline void machine_emergency_restart(void)
++static inline __noreturn void machine_emergency_restart(void)
+ {
+       machine_restart(NULL);
+ }
+diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
+index 90f99c7..00ce236 100644
+--- a/include/asm-generic/kmap_types.h
++++ b/include/asm-generic/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define _ASM_GENERIC_KMAP_TYPES_H
+ #ifdef __WITH_KM_FENCE
+-# define KM_TYPE_NR 41
++# define KM_TYPE_NR 42
+ #else
+-# define KM_TYPE_NR 20
++# define KM_TYPE_NR 21
+ #endif
+ #endif
+diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
+index 9ceb03b..62b0b8f 100644
+--- a/include/asm-generic/local.h
++++ b/include/asm-generic/local.h
+@@ -23,24 +23,37 @@ typedef struct
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i)        atomic_long_set((&(l)->a),(i))
++#define local_set_unchecked(l,i)      atomic_long_set_unchecked((&(l)->a),(i))
+ #define local_inc(l)  atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l)        atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l)  atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l)        atomic_long_dec_unchecked(&(l)->a)
+ #define local_add(i,l)        atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l)      atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l)        atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l)      atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+ #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+ #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+ #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+ #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
++#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
+ #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+ #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
++#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
+ #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
++#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+ #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+ #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
+ #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
+index 725612b..9cc513a 100644
+--- a/include/asm-generic/pgtable-nopmd.h
++++ b/include/asm-generic/pgtable-nopmd.h
+@@ -1,14 +1,19 @@
+ #ifndef _PGTABLE_NOPMD_H
+ #define _PGTABLE_NOPMD_H
+-#ifndef __ASSEMBLY__
+-
+ #include <asm-generic/pgtable-nopud.h>
+-struct mm_struct;
+-
+ #define __PAGETABLE_PMD_FOLDED
++#define PMD_SHIFT     PUD_SHIFT
++#define PTRS_PER_PMD  1
++#define PMD_SIZE      (_AC(1,UL) << PMD_SHIFT)
++#define PMD_MASK      (~(PMD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
+ /*
+  * Having the pmd type consist of a pud gets the size right, and allows
+  * us to conceptually access the pud entry that this pmd is folded into
+@@ -16,11 +21,6 @@ struct mm_struct;
+  */
+ typedef struct { pud_t pud; } pmd_t;
+-#define PMD_SHIFT     PUD_SHIFT
+-#define PTRS_PER_PMD  1
+-#define PMD_SIZE      (1UL << PMD_SHIFT)
+-#define PMD_MASK      (~(PMD_SIZE-1))
+-
+ /*
+  * The "pud_xxx()" functions here are trivial for a folded two-level
+  * setup: the pmd is never bad, and a pmd always exists (as it's folded
+diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
+index 810431d..0ec4804f 100644
+--- a/include/asm-generic/pgtable-nopud.h
++++ b/include/asm-generic/pgtable-nopud.h
+@@ -1,10 +1,15 @@
+ #ifndef _PGTABLE_NOPUD_H
+ #define _PGTABLE_NOPUD_H
+-#ifndef __ASSEMBLY__
+-
+ #define __PAGETABLE_PUD_FOLDED
++#define PUD_SHIFT     PGDIR_SHIFT
++#define PTRS_PER_PUD  1
++#define PUD_SIZE      (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK      (~(PUD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
+ /*
+  * Having the pud type consist of a pgd gets the size right, and allows
+  * us to conceptually access the pgd entry that this pud is folded into
+@@ -12,11 +17,6 @@
+  */
+ typedef struct { pgd_t pgd; } pud_t;
+-#define PUD_SHIFT     PGDIR_SHIFT
+-#define PTRS_PER_PUD  1
+-#define PUD_SIZE      (1UL << PUD_SHIFT)
+-#define PUD_MASK      (~(PUD_SIZE-1))
+-
+ /*
+  * The "pgd_xxx()" functions here are trivial for a folded two-level
+  * setup: the pud is never bad, and a pud always exists (as it's folded
+@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd)     { }
+ #define pud_ERROR(pud)                                (pgd_ERROR((pud).pgd))
+ #define pgd_populate(mm, pgd, pud)            do { } while (0)
++#define pgd_populate_kernel(mm, pgd, pud)     do { } while (0)
+ /*
+  * (puds are folded into pgds so this doesn't get actually called,
+  * but the define is needed for a generic inline function.)
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index a59ff51..2594a70 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #endif /* CONFIG_MMU */
+ #endif /* !__ASSEMBLY__ */
+diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
+index c184aa8..d049942 100644
+--- a/include/asm-generic/uaccess.h
++++ b/include/asm-generic/uaccess.h
+@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
+       return __clear_user(to, n);
+ }
++#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
++//static inline unsigned long pax_open_userland(void) { return 0; }
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
++//static inline unsigned long pax_close_userland(void) { return 0; }
++#endif
++
+ #endif /* __ASM_GENERIC_UACCESS_H */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index eb58d2d..df131bf 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -239,6 +239,7 @@
+       .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
+               VMLINUX_SYMBOL(__start_rodata) = .;                     \
+               *(.rodata) *(.rodata.*)                                 \
++              *(.data..read_only)                                     \
+               *(__vermagic)           /* Kernel version magic */      \
+               . = ALIGN(8);                                           \
+               VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
+@@ -749,17 +750,18 @@
+  * section in the linker script will go there too.  @phdr should have
+  * a leading colon.
+  *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+  * If there is no need to put the percpu section at a predetermined
+  * address, use PERCPU_SECTION.
+  */
+ #define PERCPU_VADDR(cacheline, vaddr, phdr)                          \
+-      VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
+-      .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
++      per_cpu_load = .;                                               \
++      .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load)           \
+                               - LOAD_OFFSET) {                        \
++              VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load;      \
+               PERCPU_INPUT(cacheline)                                 \
+       } phdr                                                          \
+-      . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
++      . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
+ /**
+  * PERCPU_SECTION - define output section for percpu area, simple version
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 418d270..bfd2794 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -34,7 +34,7 @@ struct crypto_type {
+       unsigned int maskclear;
+       unsigned int maskset;
+       unsigned int tfmsize;
+-};
++} __do_const;
+ struct crypto_instance {
+       struct crypto_alg alg;
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 63d17ee..716de2b 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -72,6 +72,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/poll.h>
+ #include <asm/pgalloc.h>
++#include <asm/local.h>
+ #include <drm/drm.h>
+ #include <drm/drm_sarea.h>
+@@ -296,10 +297,12 @@ do {                                                                             \
+  * \param cmd command.
+  * \param arg argument.
+  */
+-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
++typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
++typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
+                              unsigned long arg);
+ #define DRM_IOCTL_NR(n)                _IOC_NR(n)
+@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ struct drm_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+-      drm_ioctl_t *func;
++      drm_ioctl_t func;
+       unsigned int cmd_drv;
+       const char *name;
+-};
++} __do_const;
+ /**
+  * Creates a driver or general drm_ioctl_desc array entry for the given
+@@ -1015,7 +1018,7 @@ struct drm_info_list {
+       int (*show)(struct seq_file*, void*); /** show callback */
+       u32 driver_features; /**< Required driver features for this entry */
+       void *data;
+-};
++} __do_const;
+ /**
+  * debugfs node structure. This structure represents a debugfs file.
+@@ -1088,7 +1091,7 @@ struct drm_device {
+       /** \name Usage Counters */
+       /*@{ */
+-      int open_count;                 /**< Outstanding files open */
++      local_t open_count;             /**< Outstanding files open */
+       atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
+       atomic_t vma_count;             /**< Outstanding vma areas open */
+       int buf_use;                    /**< Buffers in use -- cannot alloc */
+@@ -1099,7 +1102,7 @@ struct drm_device {
+       /*@{ */
+       unsigned long counters;
+       enum drm_stat_type types[15];
+-      atomic_t counts[15];
++      atomic_unchecked_t counts[15];
+       /*@} */
+       struct list_head filelist;
+diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+index f43d556..94d9343 100644
+--- a/include/drm/drm_crtc_helper.h
++++ b/include/drm/drm_crtc_helper.h
+@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
+                                           struct drm_connector *connector);
+       /* disable encoder when not in use - more explicit than dpms off */
+       void (*disable)(struct drm_encoder *encoder);
+-};
++} __no_const;
+ /**
+  * drm_connector_helper_funcs - helper operations for connectors
+diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
+index 72dcbe8..8db58d7 100644
+--- a/include/drm/ttm/ttm_memory.h
++++ b/include/drm/ttm/ttm_memory.h
+@@ -48,7 +48,7 @@
+ struct ttm_mem_shrink {
+       int (*do_shrink) (struct ttm_mem_shrink *);
+-};
++} __no_const;
+ /**
+  * struct ttm_mem_global - Global memory accounting structure.
+diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
+index 4b840e8..155d235 100644
+--- a/include/keys/asymmetric-subtype.h
++++ b/include/keys/asymmetric-subtype.h
+@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
+       /* Verify the signature on a key of this subtype (optional) */
+       int (*verify_signature)(const struct key *key,
+                               const struct public_key_signature *sig);
+-};
++} __do_const;
+ /**
+  * asymmetric_key_subtype - Get the subtype from an asymmetric key
+diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
+index c1da539..1dcec55 100644
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
+ #endif
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
+       int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
+       int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
+       struct module *owner;
+-};
++} __do_const ;
+ struct atmphy_ops {
+       int (*start)(struct atm_dev *dev);
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index 70cf138..0418ee2 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -73,8 +73,10 @@ struct linux_binfmt {
+       int (*load_binary)(struct linux_binprm *);
+       int (*load_shlib)(struct file *);
+       int (*core_dump)(struct coredump_params *cprm);
++      void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
++      void (*handle_mmap)(struct file *);
+       unsigned long min_coredump;     /* minimal dump size */
+-};
++} __do_const;
+ extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 2fdb4a4..54aad7e 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1526,7 +1526,7 @@ struct block_device_operations {
+       /* this callback is with swap_lock and sometimes page table lock held */
+       void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+       struct module *owner;
+-};
++} __do_const;
+ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+                                unsigned long);
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index 7c2e030..b72475d 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -23,7 +23,7 @@ struct blk_trace {
+       struct dentry *dir;
+       struct dentry *dropped_file;
+       struct dentry *msg_file;
+-      atomic_t dropped;
++      atomic_unchecked_t dropped;
+ };
+ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+diff --git a/include/linux/cache.h b/include/linux/cache.h
+index 4c57065..4307975 100644
+--- a/include/linux/cache.h
++++ b/include/linux/cache.h
+@@ -16,6 +16,10 @@
+ #define __read_mostly
+ #endif
++#ifndef __read_only
++#define __read_only __read_mostly
++#endif
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index d9a4f7f4..19f77d6 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
+ extern bool nsown_capable(int cap);
+ extern bool inode_capable(const struct inode *inode, int cap);
+ extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
++extern bool capable_nolog(int cap);
++extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
++extern bool inode_capable_nolog(const struct inode *inode, int cap);
+ /* audit system wants to get cap info from files as well */
+ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
++extern int is_privileged_binary(const struct dentry *dentry);
++
+ #endif /* !_LINUX_CAPABILITY_H */
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 8609d57..86e4d79 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -87,7 +87,6 @@ struct cdrom_device_ops {
+ /* driver specifications */
+       const int capability;   /* capability flags */
+-      int n_minors;           /* number of active minor devices */
+       /* handle uniform packets for scsi type devices (scsi,atapi) */
+       int (*generic_packet) (struct cdrom_device_info *,
+                              struct packet_command *);
+diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
+index 4ce9056..86caac6 100644
+--- a/include/linux/cleancache.h
++++ b/include/linux/cleancache.h
+@@ -31,7 +31,7 @@ struct cleancache_ops {
+       void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
+       void (*invalidate_inode)(int, struct cleancache_filekey);
+       void (*invalidate_fs)(int);
+-};
++} __no_const;
+ extern struct cleancache_ops *
+       cleancache_register_ops(struct cleancache_ops *ops);
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 1186098..f87e53d 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -132,6 +132,7 @@ struct clk_ops {
+                                   unsigned long);
+       void            (*init)(struct clk_hw *hw);
+ };
++typedef struct clk_ops __no_const clk_ops_no_const;
+ /**
+  * struct clk_init_data - holds init data that's common to all clocks and is
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 7f0c1dd..206ac34 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+                          compat_size_t __user *len_ptr);
+ asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
++asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
+ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+               compat_ssize_t msgsz, int msgflg);
+@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
+ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+                              compat_ulong_t addr, compat_ulong_t data);
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+-                                compat_long_t addr, compat_long_t data);
++                                compat_ulong_t addr, compat_ulong_t data);
+ asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
+ /*
+@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ int compat_restore_altstack(const compat_stack_t __user *uss);
+ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
++void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
+ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
+                                                struct compat_timespec __user *interval);
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index 842de22..7f3a41f 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -39,9 +39,29 @@
+ # define __compiletime_warning(message) __attribute__((warning(message)))
+ # define __compiletime_error(message) __attribute__((error(message)))
+ #endif /* __CHECKER__ */
++
++#define __alloc_size(...)     __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg)               __builtin_object_size((ptr), (arg))
++#define __bos0(ptr)           __bos((ptr), 0)
++#define __bos1(ptr)           __bos((ptr), 1)
+ #endif /* GCC_VERSION >= 40300 */
+ #if GCC_VERSION >= 40500
++
++#ifdef CONSTIFY_PLUGIN
++#define __no_const __attribute__((no_const))
++#define __do_const __attribute__((do_const))
++#endif
++
++#ifdef SIZE_OVERFLOW_PLUGIN
++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
++#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
++#endif
++
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++
+ /*
+  * Mark a position in code as unreachable.  This can be used to
+  * suppress control flow warnings after asm blocks that transfer
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 92669cd..1771a15 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -5,11 +5,14 @@
+ #ifdef __CHECKER__
+ # define __user               __attribute__((noderef, address_space(1)))
++# define __force_user __force __user
+ # define __kernel     __attribute__((address_space(0)))
++# define __force_kernel       __force __kernel
+ # define __safe               __attribute__((safe))
+ # define __force      __attribute__((force))
+ # define __nocast     __attribute__((nocast))
+ # define __iomem      __attribute__((noderef, address_space(2)))
++# define __force_iomem        __force __iomem
+ # define __must_hold(x)       __attribute__((context(x,1,1)))
+ # define __acquires(x)        __attribute__((context(x,0,1)))
+ # define __releases(x)        __attribute__((context(x,1,0)))
+@@ -17,20 +20,37 @@
+ # define __release(x) __context__(x,-1)
+ # define __cond_lock(x,c)     ((c) ? ({ __acquire(x); 1; }) : 0)
+ # define __percpu     __attribute__((noderef, address_space(3)))
++# define __force_percpu       __force __percpu
+ #ifdef CONFIG_SPARSE_RCU_POINTER
+ # define __rcu                __attribute__((noderef, address_space(4)))
++# define __force_rcu  __force __rcu
+ #else
+ # define __rcu
++# define __force_rcu
+ #endif
+ extern void __chk_user_ptr(const volatile void __user *);
+ extern void __chk_io_ptr(const volatile void __iomem *);
+ #else
+-# define __user
+-# define __kernel
++# ifdef CHECKER_PLUGIN
++//#  define __user
++//#  define __force_user
++//#  define __kernel
++//#  define __force_kernel
++# else
++#  ifdef STRUCTLEAK_PLUGIN
++#   define __user __attribute__((user))
++#  else
++#   define __user
++#  endif
++#  define __force_user
++#  define __kernel
++#  define __force_kernel
++# endif
+ # define __safe
+ # define __force
+ # define __nocast
+ # define __iomem
++# define __force_iomem
+ # define __chk_user_ptr(x) (void)0
+ # define __chk_io_ptr(x) (void)0
+ # define __builtin_warning(x, y...) (1)
+@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
+ # define __release(x) (void)0
+ # define __cond_lock(x,c) (c)
+ # define __percpu
++# define __force_percpu
+ # define __rcu
++# define __force_rcu
+ #endif
+ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ # define __attribute_const__  /* unimplemented */
+ #endif
++#ifndef __no_const
++# define __no_const
++#endif
++
++#ifndef __do_const
++# define __do_const
++#endif
++
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __intentional_overflow
++# define __intentional_overflow(...)
++#endif
++
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
+ /*
+  * Tell gcc if a function is cold. The compiler will assume any path
+  * directly leading to the call is unlikely.
+@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ #define __cold
+ #endif
++#ifndef __alloc_size
++#define __alloc_size(...)
++#endif
++
++#ifndef __bos
++#define __bos(ptr, arg)
++#endif
++
++#ifndef __bos0
++#define __bos0(ptr)
++#endif
++
++#ifndef __bos1
++#define __bos1(ptr)
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+  * use is to mediate communication between process-level code and irq/NMI
+  * handlers, all running on the same CPU.
+  */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
+ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
+ #ifdef CONFIG_KPROBES
+diff --git a/include/linux/completion.h b/include/linux/completion.h
+index 33f0280..35c6568 100644
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
+@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
+ extern void wait_for_completion(struct completion *);
+ extern void wait_for_completion_io(struct completion *);
+ extern int wait_for_completion_interruptible(struct completion *x);
+-extern int wait_for_completion_killable(struct completion *x);
++extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
+ extern unsigned long wait_for_completion_timeout(struct completion *x,
+                                                  unsigned long timeout);
+ extern unsigned long wait_for_completion_io_timeout(struct completion *x,
+                                                   unsigned long timeout);
+ extern long wait_for_completion_interruptible_timeout(
+-      struct completion *x, unsigned long timeout);
++      struct completion *x, unsigned long timeout) __intentional_overflow(-1);
+ extern long wait_for_completion_killable_timeout(
+-      struct completion *x, unsigned long timeout);
++      struct completion *x, unsigned long timeout) __intentional_overflow(-1);
+ extern bool try_wait_for_completion(struct completion *x);
+ extern bool completion_done(struct completion *x);
+diff --git a/include/linux/configfs.h b/include/linux/configfs.h
+index 34025df..d94bbbc 100644
+--- a/include/linux/configfs.h
++++ b/include/linux/configfs.h
+@@ -125,7 +125,7 @@ struct configfs_attribute {
+       const char              *ca_name;
+       struct module           *ca_owner;
+       umode_t                 ca_mode;
+-};
++} __do_const;
+ /*
+  * Users often need to create attribute structures for their configurable
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 9f3c7e8..a18c7b6 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -115,7 +115,7 @@ enum {
+ /* Need to know about CPUs going up/down? */
+ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
+ #define cpu_notifier(fn, pri) {                                       \
+-      static struct notifier_block fn##_nb __cpuinitdata =    \
++      static struct notifier_block fn##_nb =                  \
+               { .notifier_call = fn, .priority = pri };       \
+       register_cpu_notifier(&fn##_nb);                        \
+ }
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 037d36a..ca5fe6e 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -262,7 +262,7 @@ struct cpufreq_driver {
+       int     (*suspend)      (struct cpufreq_policy *policy);
+       int     (*resume)       (struct cpufreq_policy *policy);
+       struct freq_attr        **attr;
+-};
++} __do_const;
+ /* flags */
+@@ -321,6 +321,7 @@ struct global_attr {
+       ssize_t (*store)(struct kobject *a, struct attribute *b,
+                        const char *c, size_t count);
+ };
++typedef struct global_attr __no_const global_attr_no_const;
+ #define define_one_global_ro(_name)           \
+ static struct global_attr _name =             \
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index 8f04062..900239a 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -52,7 +52,8 @@ struct cpuidle_state {
+                       int index);
+       int (*enter_dead) (struct cpuidle_device *dev, int index);
+-};
++} __do_const;
++typedef struct cpuidle_state __no_const cpuidle_state_no_const;
+ /* Idle State Flags */
+ #define CPUIDLE_FLAG_TIME_VALID       (0x01) /* is residency time measurable? */
+@@ -191,7 +192,7 @@ struct cpuidle_governor {
+       void (*reflect)         (struct cpuidle_device *dev, int index);
+       struct module           *owner;
+-};
++} __do_const;
+ #ifdef CONFIG_CPU_IDLE
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index d08e4d2..95fad61 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+ }
+ /* Valid inputs for n are -1 and 0. */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+       return n+1;
+ }
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+       return n+1;
+ }
+-static inline unsigned int cpumask_next_and(int n,
++static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
+                                           const struct cpumask *srcp,
+                                           const struct cpumask *andp)
+ {
+@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+  *
+  * Returns >= nr_cpu_ids if no further cpus set.
+  */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+       /* -1 is a legal arg here. */
+       if (n != -1)
+@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+  *
+  * Returns >= nr_cpu_ids if no further cpus unset.
+  */
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+       /* -1 is a legal arg here. */
+       if (n != -1)
+@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+       return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ }
+-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
++int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
+ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+ /**
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 04421e8..6bce4ef 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+ static inline void validate_process_creds(void)
+ {
+ }
++static inline void validate_task_creds(struct task_struct *task)
++{
++}
+ #endif
+ /**
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index b92eadf..b4ecdc1 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -373,7 +373,7 @@ struct cipher_tfm {
+                         const u8 *key, unsigned int keylen);
+       void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+       void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-};
++} __no_const;
+ struct hash_tfm {
+       int (*init)(struct hash_desc *desc);
+@@ -394,13 +394,13 @@ struct compress_tfm {
+       int (*cot_decompress)(struct crypto_tfm *tfm,
+                             const u8 *src, unsigned int slen,
+                             u8 *dst, unsigned int *dlen);
+-};
++} __no_const;
+ struct rng_tfm {
+       int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
+                             unsigned int dlen);
+       int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+-};
++} __no_const;
+ #define crt_ablkcipher        crt_u.ablkcipher
+ #define crt_aead      crt_u.aead
+diff --git a/include/linux/ctype.h b/include/linux/ctype.h
+index 653589e..4ef254a 100644
+--- a/include/linux/ctype.h
++++ b/include/linux/ctype.h
+@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
+  * Fast implementation of tolower() for internal usage. Do not use in your
+  * code.
+  */
+-static inline char _tolower(const char c)
++static inline unsigned char _tolower(const unsigned char c)
+ {
+       return c | 0x20;
+ }
+diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
+index 7925bf0..d5143d2 100644
+--- a/include/linux/decompress/mm.h
++++ b/include/linux/decompress/mm.h
+@@ -77,7 +77,7 @@ static void free(void *where)
+  * warnings when not needed (indeed large_malloc / large_free are not
+  * needed by inflate */
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #define large_malloc(a) vmalloc(a)
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index fe8c447..bdc1f33 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -114,7 +114,7 @@ struct devfreq_governor {
+       int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+       int (*event_handler)(struct devfreq *devfreq,
+                               unsigned int event, void *data);
+-};
++} __do_const;
+ /**
+  * struct devfreq - Device devfreq structure
+diff --git a/include/linux/device.h b/include/linux/device.h
+index c0a1261..dba7569 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -290,7 +290,7 @@ struct subsys_interface {
+       struct list_head node;
+       int (*add_dev)(struct device *dev, struct subsys_interface *sif);
+       int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+-};
++} __do_const;
+ int subsys_interface_register(struct subsys_interface *sif);
+ void subsys_interface_unregister(struct subsys_interface *sif);
+@@ -473,7 +473,7 @@ struct device_type {
+       void (*release)(struct device *dev);
+       const struct dev_pm_ops *pm;
+-};
++} __do_const;
+ /* interface for exporting device attributes */
+ struct device_attribute {
+@@ -483,11 +483,12 @@ struct device_attribute {
+       ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count);
+ };
++typedef struct device_attribute __no_const device_attribute_no_const;
+ struct dev_ext_attribute {
+       struct device_attribute attr;
+       void *var;
+-};
++} __do_const;
+ ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
+                         char *buf);
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 94af418..b1ca7a2 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -54,7 +54,7 @@ struct dma_map_ops {
+       u64 (*get_required_mask)(struct device *dev);
+ #endif
+       int is_phys;
+-};
++} __do_const;
+ #define DMA_BIT_MASK(n)       (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index 96d3e4a..dc36433 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
+ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
+ void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
+-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
++dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+       struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
+-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
++dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+       struct dma_pinned_list *pinned_list, struct page *page,
+       unsigned int offset, size_t len);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 2bc0ad7..3f7b006 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -745,6 +745,7 @@ struct efivar_operations {
+       efi_set_variable_t *set_variable;
+       efi_query_variable_store_t *query_variable_store;
+ };
++typedef struct efivar_operations __no_const efivar_operations_no_const;
+ struct efivars {
+       /*
+diff --git a/include/linux/elf.h b/include/linux/elf.h
+index 40a3c0e..4c45a38 100644
+--- a/include/linux/elf.h
++++ b/include/linux/elf.h
+@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_note      elf32_note
+ #define elf_addr_t    Elf32_Off
+ #define Elf_Half      Elf32_Half
++#define elf_dyn               Elf32_Dyn
+ #else
+@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_note      elf64_note
+ #define elf_addr_t    Elf64_Off
+ #define Elf_Half      Elf64_Half
++#define elf_dyn               Elf64_Dyn
+ #endif
+diff --git a/include/linux/err.h b/include/linux/err.h
+index f2edce2..cc2082c 100644
+--- a/include/linux/err.h
++++ b/include/linux/err.h
+@@ -19,12 +19,12 @@
+ #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+-static inline void * __must_check ERR_PTR(long error)
++static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
+ {
+       return (void *) error;
+ }
+-static inline long __must_check PTR_ERR(const void *ptr)
++static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
+ {
+       return (long) ptr;
+ }
+diff --git a/include/linux/extcon.h b/include/linux/extcon.h
+index fcb51c8..bdafcf6 100644
+--- a/include/linux/extcon.h
++++ b/include/linux/extcon.h
+@@ -134,7 +134,7 @@ struct extcon_dev {
+       /* /sys/class/extcon/.../mutually_exclusive/... */
+       struct attribute_group attr_g_muex;
+       struct attribute **attrs_muex;
+-      struct device_attribute *d_attrs_muex;
++      device_attribute_no_const *d_attrs_muex;
+ };
+ /**
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index d49c60f..2834fbe 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -304,7 +304,7 @@ struct fb_ops {
+       /* called at KDB enter and leave time to prepare the console */
+       int (*fb_debug_enter)(struct fb_info *info);
+       int (*fb_debug_leave)(struct fb_info *info);
+-};
++} __do_const;
+ #ifdef CONFIG_FB_TILEBLITTING
+ #define FB_TILE_CURSOR_NONE        0
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index f65f5a6..2f4f93a 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -20,6 +20,7 @@ struct compat_sock_fprog {
+ struct sk_buff;
+ struct sock;
++struct bpf_jit_work;
+ struct sk_filter
+ {
+@@ -27,6 +28,9 @@ struct sk_filter
+       unsigned int            len;    /* Number of filter blocks */
+       unsigned int            (*bpf_func)(const struct sk_buff *skb,
+                                           const struct sock_filter *filter);
++#ifdef CONFIG_BPF_JIT
++      struct bpf_jit_work     *work;
++#endif
+       struct rcu_head         rcu;
+       struct sock_filter      insns[0];
+ };
+diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
+index 8293262..2b3b8bd 100644
+--- a/include/linux/frontswap.h
++++ b/include/linux/frontswap.h
+@@ -11,7 +11,7 @@ struct frontswap_ops {
+       int (*load)(unsigned, pgoff_t, struct page *);
+       void (*invalidate_page)(unsigned, pgoff_t);
+       void (*invalidate_area)(unsigned);
+-};
++} __no_const;
+ extern bool frontswap_enabled;
+ extern struct frontswap_ops *
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 65c2be2..4c53f6e 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1543,7 +1543,8 @@ struct file_operations {
+       long (*fallocate)(struct file *file, int mode, loff_t offset,
+                         loff_t len);
+       int (*show_fdinfo)(struct seq_file *m, struct file *f);
+-};
++} __do_const;
++typedef struct file_operations __no_const file_operations_no_const;
+ struct inode_operations {
+       struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
+@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
+               inode->i_flags |= S_NOSEC;
+ }
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++      umode_t mode = inode->i_mode;
++      return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++      return false;
++#endif
++}
++
+ #endif /* _LINUX_FS_H */
+diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
+index 2b93a9a..855d94a 100644
+--- a/include/linux/fs_struct.h
++++ b/include/linux/fs_struct.h
+@@ -6,7 +6,7 @@
+ #include <linux/seqlock.h>
+ struct fs_struct {
+-      int users;
++      atomic_t users;
+       spinlock_t lock;
+       seqcount_t seq;
+       int umask;
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index 5dfa0aa..6acf322 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -112,7 +112,7 @@ struct fscache_operation {
+       fscache_operation_release_t release;
+ };
+-extern atomic_t fscache_op_debug_id;
++extern atomic_unchecked_t fscache_op_debug_id;
+ extern void fscache_op_work_func(struct work_struct *work);
+ extern void fscache_enqueue_operation(struct fscache_operation *);
+@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
+       INIT_WORK(&op->work, fscache_op_work_func);
+       atomic_set(&op->usage, 1);
+       op->state = FSCACHE_OP_ST_INITIALISED;
+-      op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++      op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+       op->processor = processor;
+       op->release = release;
+       INIT_LIST_HEAD(&op->pend_link);
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index 7a08623..4c07b0f 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -152,7 +152,7 @@ struct fscache_cookie_def {
+        * - this is mandatory for any object that may have data
+        */
+       void (*now_uncached)(void *cookie_netfs_data);
+-};
++} __do_const;
+ /*
+  * fscache cached network filesystem type
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index a78680a..87bd73e 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
+       struct inode *inode = path->dentry->d_inode;
+       __u32 mask = FS_ACCESS;
++      if (is_sidechannel_device(inode))
++              return;
++
+       if (S_ISDIR(inode->i_mode))
+               mask |= FS_ISDIR;
+@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
+       struct inode *inode = path->dentry->d_inode;
+       __u32 mask = FS_MODIFY;
++      if (is_sidechannel_device(inode))
++              return;
++
+       if (S_ISDIR(inode->i_mode))
+               mask |= FS_ISDIR;
+@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+  */
+ static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+ {
+-      return kstrdup(name, GFP_KERNEL);
++      return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
+ }
+ /*
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 9f3c275..911b591 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -194,7 +194,7 @@ struct gendisk {
+       struct kobject *slave_dir;
+       struct timer_rand_state *random;
+-      atomic_t sync_io;               /* RAID */
++      atomic_unchecked_t sync_io;     /* RAID */
+       struct disk_events *ev;
+ #ifdef  CONFIG_BLK_DEV_INTEGRITY
+       struct blk_integrity *integrity;
+diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
+index 023bc34..b02b46a 100644
+--- a/include/linux/genl_magic_func.h
++++ b/include/linux/genl_magic_func.h
+@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+ },
+ #define ZZZ_genl_ops          CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
++static struct genl_ops ZZZ_genl_ops[] = {
+ #include GENL_MAGIC_INCLUDE_FILE
+ };
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 0f615eb..5c3832f 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -35,6 +35,13 @@ struct vm_area_struct;
+ #define ___GFP_NO_KSWAPD      0x400000u
+ #define ___GFP_OTHER_NODE     0x800000u
+ #define ___GFP_WRITE          0x1000000u
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define ___GFP_USERCOPY               0x2000000u
++#else
++#define ___GFP_USERCOPY               0
++#endif
++
+ /* If the above are modified, __GFP_BITS_SHIFT may need updating */
+ /*
+@@ -92,6 +99,7 @@ struct vm_area_struct;
+ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
+ #define __GFP_KMEMCG  ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
+ #define __GFP_WRITE   ((__force gfp_t)___GFP_WRITE)   /* Allocator intends to dirty page */
++#define __GFP_USERCOPY        ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
+ /*
+  * This may seem redundant, but it's a way of annotating false positives vs.
+@@ -99,7 +107,7 @@ struct vm_area_struct;
+  */
+ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+-#define __GFP_BITS_SHIFT 25   /* Room for N __GFP_FOO bits */
++#define __GFP_BITS_SHIFT 26   /* Room for N __GFP_FOO bits */
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+ /* This equals 0, but use constants in case they ever change */
+@@ -153,6 +161,8 @@ struct vm_area_struct;
+ /* 4GB DMA on some platforms */
+ #define GFP_DMA32     __GFP_DMA32
++#define GFP_USERCOPY  __GFP_USERCOPY
++
+ /* Convert GFP flags to their corresponding migrate type */
+ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+ {
+diff --git a/include/linux/gracl.h b/include/linux/gracl.h
+new file mode 100644
+index 0000000..ebe6d72
+--- /dev/null
++++ b/include/linux/gracl.h
+@@ -0,0 +1,319 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/capability.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION  "grsecurity 2.9.1"
++#define GRSECURITY_VERSION 0x2901
++
++enum {
++      GR_SHUTDOWN = 0,
++      GR_ENABLE = 1,
++      GR_SPROLE = 2,
++      GR_RELOAD = 3,
++      GR_SEGVMOD = 4,
++      GR_STATUS = 5,
++      GR_UNSPROLE = 6,
++      GR_PASSSET = 7,
++      GR_SPROLEPAM = 8,
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++      GR_PW_LEN = 128,
++      GR_SALT_LEN = 16,
++      GR_SHA_LEN = 32,
++};
++
++enum {
++      GR_SPROLE_LEN = 64,
++};
++
++enum {
++      GR_NO_GLOB = 0,
++      GR_REG_GLOB,
++      GR_CREATE_GLOB
++};
++
++#define GR_NLIMITS 32
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++      unsigned char *rolename;
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];  /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++      __u32 key;
++      ino_t inode;
++      dev_t device;
++      char *name;
++      __u16 len;
++      __u8 deleted;
++      struct name_entry *prev;
++      struct name_entry *next;
++};
++
++struct inodev_entry {
++      struct name_entry *nentry;
++      struct inodev_entry *prev;
++      struct inodev_entry *next;
++};
++
++struct acl_role_db {
++      struct acl_role_label **r_hash;
++      __u32 r_size;
++};
++
++struct inodev_db {
++      struct inodev_entry **i_hash;
++      __u32 i_size;
++};
++
++struct name_db {
++      struct name_entry **n_hash;
++      __u32 n_size;
++};
++
++struct crash_uid {
++      uid_t uid;
++      unsigned long expires;
++};
++
++struct gr_hash_struct {
++      void **table;
++      void **nametable;
++      void *first;
++      __u32 table_size;
++      __u32 used_size;
++      int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++
++struct acl_subject_label {
++      char *filename;
++      ino_t inode;
++      dev_t device;
++      __u32 mode;
++      kernel_cap_t cap_mask;
++      kernel_cap_t cap_lower;
++      kernel_cap_t cap_invert_audit;
++
++      struct rlimit res[GR_NLIMITS];
++      __u32 resmask;
++
++      __u8 user_trans_type;
++      __u8 group_trans_type;
++      uid_t *user_transitions;
++      gid_t *group_transitions;
++      __u16 user_trans_num;
++      __u16 group_trans_num;
++
++      __u32 sock_families[2];
++      __u32 ip_proto[8];
++      __u32 ip_type;
++      struct acl_ip_label **ips;
++      __u32 ip_num;
++      __u32 inaddr_any_override;
++
++      __u32 crashes;
++      unsigned long expires;
++
++      struct acl_subject_label *parent_subject;
++      struct gr_hash_struct *hash;
++      struct acl_subject_label *prev;
++      struct acl_subject_label *next;
++
++      struct acl_object_label **obj_hash;
++      __u32 obj_hash_size;
++      __u16 pax_flags;
++};
++
++struct role_allowed_ip {
++      __u32 addr;
++      __u32 netmask;
++
++      struct role_allowed_ip *prev;
++      struct role_allowed_ip *next;
++};
++
++struct role_transition {
++      char *rolename;
++
++      struct role_transition *prev;
++      struct role_transition *next;
++};
++
++struct acl_role_label {
++      char *rolename;
++      uid_t uidgid;
++      __u16 roletype;
++
++      __u16 auth_attempts;
++      unsigned long expires;
++
++      struct acl_subject_label *root_label;
++      struct gr_hash_struct *hash;
++
++      struct acl_role_label *prev;
++      struct acl_role_label *next;
++
++      struct role_transition *transitions;
++      struct role_allowed_ip *allowed_ips;
++      uid_t *domain_children;
++      __u16 domain_child_num;
++
++      umode_t umask;
++
++      struct acl_subject_label **subj_hash;
++      __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++      struct acl_role_label **r_table;
++      __u32 num_pointers;             /* Number of allocations to track */
++      __u32 num_roles;                /* Number of roles */
++      __u32 num_domain_children;      /* Number of domain children */
++      __u32 num_subjects;             /* Number of subjects */
++      __u32 num_objects;              /* Number of objects */
++};
++
++struct acl_object_label {
++      char *filename;
++      ino_t inode;
++      dev_t device;
++      __u32 mode;
++
++      struct acl_subject_label *nested;
++      struct acl_object_label *globbed;
++
++      /* next two structures not used */
++
++      struct acl_object_label *prev;
++      struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++      char *iface;
++      __u32 addr;
++      __u32 netmask;
++      __u16 low, high;
++      __u8 mode;
++      __u32 type;
++      __u32 proto[8];
++
++      /* next two structures not used */
++
++      struct acl_ip_label *prev;
++      struct acl_ip_label *next;
++};
++
++struct gr_arg {
++      struct user_acl_role_db role_db;
++      unsigned char pw[GR_PW_LEN];
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++      unsigned char sp_role[GR_SPROLE_LEN];
++      struct sprole_pw *sprole_pws;
++      dev_t segv_device;
++      ino_t segv_inode;
++      uid_t segv_uid;
++      __u16 num_sprole_pws;
++      __u16 mode;
++};
++
++struct gr_arg_wrapper {
++      struct gr_arg *arg;
++      __u32 version;
++      __u32 size;
++};
++
++struct subject_map {
++      struct acl_subject_label *user;
++      struct acl_subject_label *kernel;
++      struct subject_map *prev;
++      struct subject_map *next;
++};
++
++struct acl_subj_map_db {
++      struct subject_map **s_hash;
++      __u32 s_size;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++   Makes good use of the low bits of the inode.  Generally 0-1 times
++   in loop for successful match.  0-3 for unsuccessful match.
++   Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned int
++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++{
++      return ((((uid + type) << (16 + type)) ^ uid) % sz);
++}
++
++ static __inline__ unsigned int
++gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
++{
++      return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned int
++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++{
++      return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned int
++gr_nhash(const char *name, const __u16 len, const unsigned int sz)
++{
++      return full_name_hash((const unsigned char *)name, len) % sz;
++}
++
++#define FOR_EACH_ROLE_START(role) \
++      role = role_list; \
++      while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++              role = role->prev; \
++      }
++
++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
++      subj = NULL; \
++      iter = 0; \
++      while (iter < role->subj_hash_size) { \
++              if (subj == NULL) \
++                      subj = role->subj_hash[iter]; \
++              if (subj == NULL) { \
++                      iter++; \
++                      continue; \
++              }
++
++#define FOR_EACH_SUBJECT_END(subj,iter) \
++              subj = subj->next; \
++              if (subj == NULL) \
++                      iter++; \
++      }
++
++
++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
++      subj = role->hash->first; \
++      while (subj != NULL) {
++
++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
++              subj = subj->next; \
++      }
++
++#endif
++
+diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
+new file mode 100644
+index 0000000..33ebd1f
+--- /dev/null
++++ b/include/linux/gracl_compat.h
+@@ -0,0 +1,156 @@
++#ifndef GR_ACL_COMPAT_H
++#define GR_ACL_COMPAT_H
++
++#include <linux/resource.h>
++#include <asm/resource.h>
++
++struct sprole_pw_compat {
++      compat_uptr_t rolename;
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++};
++
++struct gr_hash_struct_compat {
++      compat_uptr_t table;
++      compat_uptr_t nametable;
++      compat_uptr_t first;
++      __u32 table_size;
++      __u32 used_size;
++      int type;
++};
++
++struct acl_subject_label_compat {
++      compat_uptr_t filename;
++      compat_ino_t inode;
++      __u32 device;
++      __u32 mode;
++      kernel_cap_t cap_mask;
++      kernel_cap_t cap_lower;
++      kernel_cap_t cap_invert_audit;
++
++      struct compat_rlimit res[GR_NLIMITS];
++      __u32 resmask;
++
++      __u8 user_trans_type;
++      __u8 group_trans_type;
++      compat_uptr_t user_transitions;
++      compat_uptr_t group_transitions;
++      __u16 user_trans_num;
++      __u16 group_trans_num;
++
++      __u32 sock_families[2];
++      __u32 ip_proto[8];
++      __u32 ip_type;
++      compat_uptr_t ips;
++      __u32 ip_num;
++      __u32 inaddr_any_override;
++
++      __u32 crashes;
++      compat_ulong_t expires;
++
++      compat_uptr_t parent_subject;
++      compat_uptr_t hash;
++      compat_uptr_t prev;
++      compat_uptr_t next;
++
++      compat_uptr_t obj_hash;
++      __u32 obj_hash_size;
++      __u16 pax_flags;
++};
++
++struct role_allowed_ip_compat {
++      __u32 addr;
++      __u32 netmask;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct role_transition_compat {
++      compat_uptr_t rolename;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct acl_role_label_compat {
++      compat_uptr_t rolename;
++      uid_t uidgid;
++      __u16 roletype;
++
++      __u16 auth_attempts;
++      compat_ulong_t expires;
++
++      compat_uptr_t root_label;
++      compat_uptr_t hash;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++
++      compat_uptr_t transitions;
++      compat_uptr_t allowed_ips;
++      compat_uptr_t domain_children;
++      __u16 domain_child_num;
++
++      umode_t umask;
++
++      compat_uptr_t subj_hash;
++      __u32 subj_hash_size;
++};
++
++struct user_acl_role_db_compat {
++      compat_uptr_t r_table;
++      __u32 num_pointers;
++      __u32 num_roles;
++      __u32 num_domain_children;
++      __u32 num_subjects;
++      __u32 num_objects;
++};
++
++struct acl_object_label_compat {
++      compat_uptr_t filename;
++      compat_ino_t inode;
++      __u32 device;
++      __u32 mode;
++
++      compat_uptr_t nested;
++      compat_uptr_t globbed;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct acl_ip_label_compat {
++      compat_uptr_t iface;
++      __u32 addr;
++      __u32 netmask;
++      __u16 low, high;
++      __u8 mode;
++      __u32 type;
++      __u32 proto[8];
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct gr_arg_compat {
++      struct user_acl_role_db_compat role_db;
++      unsigned char pw[GR_PW_LEN];
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++      unsigned char sp_role[GR_SPROLE_LEN];
++      compat_uptr_t sprole_pws;
++      __u32 segv_device;
++      compat_ino_t segv_inode;
++      uid_t segv_uid;
++      __u16 num_sprole_pws;
++      __u16 mode;
++};
++
++struct gr_arg_wrapper_compat {
++      compat_uptr_t arg;
++      __u32 version;
++      __u32 size;
++};
++
++#endif
+diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
+new file mode 100644
+index 0000000..323ecf2
+--- /dev/null
++++ b/include/linux/gralloc.h
+@@ -0,0 +1,9 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++void *acl_alloc_num(unsigned long num, unsigned long len);
++
++#endif
+diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
+new file mode 100644
+index 0000000..be66033
+--- /dev/null
++++ b/include/linux/grdefs.h
+@@ -0,0 +1,140 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++      GR_READY = 0x01,
++      GR_STATUS_INIT = 0x00   // disabled state
++};
++
++/* Begin  ACL declarations */
++
++/* Role flags */
++
++enum {
++      GR_ROLE_USER = 0x0001,
++      GR_ROLE_GROUP = 0x0002,
++      GR_ROLE_DEFAULT = 0x0004,
++      GR_ROLE_SPECIAL = 0x0008,
++      GR_ROLE_AUTH = 0x0010,
++      GR_ROLE_NOPW = 0x0020,
++      GR_ROLE_GOD = 0x0040,
++      GR_ROLE_LEARN = 0x0080,
++      GR_ROLE_TPE = 0x0100,
++      GR_ROLE_DOMAIN = 0x0200,
++      GR_ROLE_PAM = 0x0400,
++      GR_ROLE_PERSIST = 0x0800
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++      GR_DELETED = 0x80000000
++};
++
++/* ACL Object-only mode flags */
++enum {
++      GR_READ         = 0x00000001,
++      GR_APPEND       = 0x00000002,
++      GR_WRITE        = 0x00000004,
++      GR_EXEC         = 0x00000008,
++      GR_FIND         = 0x00000010,
++      GR_INHERIT      = 0x00000020,
++      GR_SETID        = 0x00000040,
++      GR_CREATE       = 0x00000080,
++      GR_DELETE       = 0x00000100,
++      GR_LINK         = 0x00000200,
++      GR_AUDIT_READ   = 0x00000400,
++      GR_AUDIT_APPEND = 0x00000800,
++      GR_AUDIT_WRITE  = 0x00001000,
++      GR_AUDIT_EXEC   = 0x00002000,
++      GR_AUDIT_FIND   = 0x00004000,
++      GR_AUDIT_INHERIT= 0x00008000,
++      GR_AUDIT_SETID  = 0x00010000,
++      GR_AUDIT_CREATE = 0x00020000,
++      GR_AUDIT_DELETE = 0x00040000,
++      GR_AUDIT_LINK   = 0x00080000,
++      GR_PTRACERD     = 0x00100000,
++      GR_NOPTRACE     = 0x00200000,
++      GR_SUPPRESS     = 0x00400000,
++      GR_NOLEARN      = 0x00800000,
++      GR_INIT_TRANSFER= 0x01000000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++                 GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++                 GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
++
++/* ACL subject-only mode flags */
++enum {
++      GR_KILL         = 0x00000001,
++      GR_VIEW         = 0x00000002,
++      GR_PROTECTED    = 0x00000004,
++      GR_LEARN        = 0x00000008,
++      GR_OVERRIDE     = 0x00000010,
++      /* just a placeholder, this mode is only used in userspace */
++      GR_DUMMY        = 0x00000020,
++      GR_PROTSHM      = 0x00000040,
++      GR_KILLPROC     = 0x00000080,
++      GR_KILLIPPROC   = 0x00000100,
++      /* just a placeholder, this mode is only used in userspace */
++      GR_NOTROJAN     = 0x00000200,
++      GR_PROTPROCFD   = 0x00000400,
++      GR_PROCACCT     = 0x00000800,
++      GR_RELAXPTRACE  = 0x00001000,
++      //GR_NESTED     = 0x00002000,
++      GR_INHERITLEARN = 0x00004000,
++      GR_PROCFIND     = 0x00008000,
++      GR_POVERRIDE    = 0x00010000,
++      GR_KERNELAUTH   = 0x00020000,
++      GR_ATSECURE     = 0x00040000,
++      GR_SHMEXEC      = 0x00080000
++};
++
++enum {
++      GR_PAX_ENABLE_SEGMEXEC  = 0x0001,
++      GR_PAX_ENABLE_PAGEEXEC  = 0x0002,
++      GR_PAX_ENABLE_MPROTECT  = 0x0004,
++      GR_PAX_ENABLE_RANDMMAP  = 0x0008,
++      GR_PAX_ENABLE_EMUTRAMP  = 0x0010,
++      GR_PAX_DISABLE_SEGMEXEC = 0x0100,
++      GR_PAX_DISABLE_PAGEEXEC = 0x0200,
++      GR_PAX_DISABLE_MPROTECT = 0x0400,
++      GR_PAX_DISABLE_RANDMMAP = 0x0800,
++      GR_PAX_DISABLE_EMUTRAMP = 0x1000,
++};
++
++enum {
++      GR_ID_USER      = 0x01,
++      GR_ID_GROUP     = 0x02,
++};
++
++enum {
++      GR_ID_ALLOW     = 0x01,
++      GR_ID_DENY      = 0x02,
++};
++
++#define GR_CRASH_RES  31
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++      GR_RLIM_CPU_BUMP = 60,
++      GR_RLIM_FSIZE_BUMP = 50000,
++      GR_RLIM_DATA_BUMP = 10000,
++      GR_RLIM_STACK_BUMP = 1000,
++      GR_RLIM_CORE_BUMP = 10000,
++      GR_RLIM_RSS_BUMP = 500000,
++      GR_RLIM_NPROC_BUMP = 1,
++      GR_RLIM_NOFILE_BUMP = 5,
++      GR_RLIM_MEMLOCK_BUMP = 50000,
++      GR_RLIM_AS_BUMP = 500000,
++      GR_RLIM_LOCKS_BUMP = 2,
++      GR_RLIM_SIGPENDING_BUMP = 5,
++      GR_RLIM_MSGQUEUE_BUMP = 10000,
++      GR_RLIM_NICE_BUMP = 1,
++      GR_RLIM_RTPRIO_BUMP = 1,
++      GR_RLIM_RTTIME_BUMP = 1000000
++};
++
++#endif
+diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
+new file mode 100644
+index 0000000..fd8598b
+--- /dev/null
++++ b/include/linux/grinternal.h
+@@ -0,0 +1,228 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++void gr_add_learn_entry(const char *fmt, ...)
++      __attribute__ ((format (printf, 1, 2)));
++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++                          const struct vfsmount *mnt);
++__u32 gr_check_create(const struct dentry *new_dentry,
++                           const struct dentry *parent,
++                           const struct vfsmount *mnt, const __u32 mode);
++int gr_check_protected_task(const struct task_struct *task);
++__u32 to_gr_audit(const __u32 reqmode);
++int gr_set_acls(const int type);
++int gr_apply_subject_to_task(struct task_struct *task);
++int gr_acl_is_enabled(void);
++char gr_roletype_to_char(void);
++
++void gr_handle_alertkill(struct task_struct *task);
++char *gr_to_filename(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename1(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename2(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename3(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++
++extern int grsec_enable_ptrace_readexec;
++extern int grsec_enable_harden_ptrace;
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_shm;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_audit_ptrace;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_rofs;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_symlinkown;
++extern kgid_t grsec_symlinkown_gid;
++extern int grsec_enable_tpe;
++extern kgid_t grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_tpe_invert;
++extern int grsec_enable_socket_all;
++extern kgid_t grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern kgid_t grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern kgid_t grsec_socket_server_gid;
++extern kgid_t grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_log_rwxmaps;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_resource_logging;
++extern int grsec_enable_blackhole;
++extern int grsec_lastack_retries;
++extern int grsec_enable_brute;
++extern int grsec_lock;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
++                      gr_to_filename2((tsk)->exec_file->f_path.dentry, \
++                      (tsk)->exec_file->f_path.mnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
++                      gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
++                      (tsk)->real_parent->exec_file->f_path.mnt) : "/")
++
++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
++                      gr_to_filename((tsk)->exec_file->f_path.dentry, \
++                      (tsk)->exec_file->f_path.mnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
++                      gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
++                      (tsk)->real_parent->exec_file->f_path.mnt) : "/")
++
++#define proc_is_chrooted(tsk_a)  ((tsk_a)->gr_is_chrooted)
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
++
++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
++{
++      if (file1 && file2) {
++              const struct inode *inode1 = file1->f_path.dentry->d_inode;
++              const struct inode *inode2 = file2->f_path.dentry->d_inode;
++              if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
++                      return true;
++      }
++
++      return false;
++}
++
++#define GR_CHROOT_CAPS {{ \
++      CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++      CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++      CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++      CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++      CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++      CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
++      CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
++
++#define security_learn(normal_msg,args...) \
++({ \
++      read_lock(&grsec_exec_file_lock); \
++      gr_add_learn_entry(normal_msg "\n", ## args); \
++      read_unlock(&grsec_exec_file_lock); \
++})
++
++enum {
++      GR_DO_AUDIT,
++      GR_DONT_AUDIT,
++      /* used for non-audit messages that we shouldn't kill the task on */
++      GR_DONT_AUDIT_GOOD
++};
++
++enum {
++      GR_TTYSNIFF,
++      GR_RBAC,
++      GR_RBAC_STR,
++      GR_STR_RBAC,
++      GR_RBAC_MODE2,
++      GR_RBAC_MODE3,
++      GR_FILENAME,
++      GR_SYSCTL_HIDDEN,
++      GR_NOARGS,
++      GR_ONE_INT,
++      GR_ONE_INT_TWO_STR,
++      GR_ONE_STR,
++      GR_STR_INT,
++      GR_TWO_STR_INT,
++      GR_TWO_INT,
++      GR_TWO_U64,
++      GR_THREE_INT,
++      GR_FIVE_INT_TWO_STR,
++      GR_TWO_STR,
++      GR_THREE_STR,
++      GR_FOUR_STR,
++      GR_STR_FILENAME,
++      GR_FILENAME_STR,
++      GR_FILENAME_TWO_INT,
++      GR_FILENAME_TWO_INT_STR,
++      GR_TEXTREL,
++      GR_PTRACE,
++      GR_RESOURCE,
++      GR_CAP,
++      GR_SIG,
++      GR_SIG2,
++      GR_CRASH1,
++      GR_CRASH2,
++      GR_PSACCT,
++      GR_RWXMAP,
++      GR_RWXMAPVMA
++};
++
++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
++
++#endif
++
++#endif
+diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
+new file mode 100644
+index 0000000..a4396b5
+--- /dev/null
++++ b/include/linux/grmsg.h
+@@ -0,0 +1,113 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
++#define GR_STOPMOD_MSG "denied modification of module state by "
++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
++#define GR_IOPERM_MSG "denied use of ioperm() by "
++#define GR_IOPL_MSG "denied use of iopl() by "
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
++#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
++#define GR_FAILFORK_MSG "failed fork with errno %s by "
++#define GR_NICE_CHROOT_MSG "denied priority change by "
++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
++#define GR_TIME_MSG "time set by "
++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
++#define GR_BIND_MSG "denied bind() by "
++#define GR_CONNECT_MSG "denied connect() by "
++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
++#define GR_CAP_ACL_MSG "use of %s denied for "
++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
++#define GR_CAP_ACL_MSG2 "use of %s permitted for "
++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
++#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
++#define GR_VM86_MSG "denied use of vm86 by "
++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
++#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds.  Please investigate the crash report for "
++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes.  Please investigate the crash report for "
+diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
+new file mode 100644
+index 0000000..3676b0b
+--- /dev/null
++++ b/include/linux/grsecurity.h
+@@ -0,0 +1,242 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/binfmts.h>
++#include <linux/gracl.h>
++
++/* notify of brain-dead configs */
++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
++#endif
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
++#endif
++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
++#error "CONFIG_PAX enabled, but no PaX options are enabled."
++#endif
++
++void gr_handle_brute_attach(unsigned long mm_flags);
++void gr_handle_brute_check(void);
++void gr_handle_kernel_exploit(void);
++
++char gr_roletype_to_char(void);
++
++int gr_acl_enable_at_secure(void);
++
++int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
++int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
++
++void gr_del_task_from_ip_table(struct task_struct *p);
++
++int gr_pid_is_chrooted(struct task_struct *p);
++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
++int gr_handle_chroot_nice(void);
++int gr_handle_chroot_sysctl(const int op);
++int gr_handle_chroot_setpriority(struct task_struct *p,
++                                      const int niceval);
++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++int gr_handle_chroot_chroot(const struct dentry *dentry,
++                                 const struct vfsmount *mnt);
++void gr_handle_chroot_chdir(const struct path *path);
++int gr_handle_chroot_chmod(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mknod(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mount(const struct dentry *dentry,
++                                const struct vfsmount *mnt,
++                                const char *dev_name);
++int gr_handle_chroot_pivot(void);
++int gr_handle_chroot_unix(const pid_t pid);
++
++int gr_handle_rawio(const struct inode *inode);
++
++void gr_handle_ioperm(void);
++void gr_handle_iopl(void);
++
++umode_t gr_acl_umask(void);
++
++int gr_tpe_allow(const struct file *file);
++
++void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
++void gr_clear_chroot_entries(struct task_struct *task);
++
++void gr_log_forkfail(const int retval);
++void gr_log_timechange(void);
++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
++void gr_log_chdir(const struct dentry *dentry,
++                       const struct vfsmount *mnt);
++void gr_log_chroot_exec(const struct dentry *dentry,
++                             const struct vfsmount *mnt);
++void gr_log_remount(const char *devname, const int retval);
++void gr_log_unmount(const char *devname, const int retval);
++void gr_log_mount(const char *from, const char *to, const int retval);
++void gr_log_textrel(struct vm_area_struct *vma);
++void gr_log_ptgnustack(struct file *file);
++void gr_log_rwxmmap(struct file *file);
++void gr_log_rwxmprotect(struct vm_area_struct *vma);
++
++int gr_handle_follow_link(const struct inode *parent,
++                               const struct inode *inode,
++                               const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++int gr_handle_fifo(const struct dentry *dentry,
++                        const struct vfsmount *mnt,
++                        const struct dentry *dir, const int flag,
++                        const int acc_mode);
++int gr_handle_hardlink(const struct dentry *dentry,
++                            const struct vfsmount *mnt,
++                            struct inode *inode,
++                            const int mode, const struct filename *to);
++
++int gr_is_capable(const int cap);
++int gr_is_capable_nolog(const int cap);
++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
++int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
++
++void gr_copy_label(struct task_struct *tsk);
++void gr_handle_crash(struct task_struct *task, const int sig);
++int gr_handle_signal(const struct task_struct *p, const int sig);
++int gr_check_crash_uid(const kuid_t uid);
++int gr_check_protected_task(const struct task_struct *task);
++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
++int gr_acl_handle_mmap(const struct file *file,
++                            const unsigned long prot);
++int gr_acl_handle_mprotect(const struct file *file,
++                                const unsigned long prot);
++int gr_check_hidden_task(const struct task_struct *tsk);
++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
++                                  const struct vfsmount *mnt);
++__u32 gr_acl_handle_utime(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++__u32 gr_acl_handle_access(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
++                               const struct vfsmount *mnt, umode_t *mode);
++__u32 gr_acl_handle_chown(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++int gr_handle_ptrace(struct task_struct *task, const long request);
++int gr_handle_proc_ptrace(struct task_struct *task);
++__u32 gr_acl_handle_execve(const struct dentry *dentry,
++                                const struct vfsmount *mnt);
++int gr_check_crash_exec(const struct file *filp);
++int gr_acl_is_enabled(void);
++void gr_set_kernel_label(struct task_struct *task);
++void gr_set_role_label(struct task_struct *task, const kuid_t uid,
++                            const kgid_t gid);
++int gr_set_proc_label(const struct dentry *dentry,
++                      const struct vfsmount *mnt,
++                      const int unsafe_flags);
++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++                              const struct vfsmount *mnt);
++__u32 gr_acl_handle_open(const struct dentry *dentry,
++                              const struct vfsmount *mnt, int acc_mode);
++__u32 gr_acl_handle_creat(const struct dentry *dentry,
++                               const struct dentry *p_dentry,
++                               const struct vfsmount *p_mnt,
++                               int open_flags, int acc_mode, const int imode);
++void gr_handle_create(const struct dentry *dentry,
++                           const struct vfsmount *mnt);
++void gr_handle_proc_create(const struct dentry *dentry,
++                         const struct inode *inode);
++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++                               const struct dentry *parent_dentry,
++                               const struct vfsmount *parent_mnt,
++                               const int mode);
++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++                               const struct dentry *parent_dentry,
++                               const struct vfsmount *parent_mnt);
++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++void gr_handle_delete(const ino_t ino, const dev_t dev);
++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
++                                const struct vfsmount *mnt);
++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++                                 const struct dentry *parent_dentry,
++                                 const struct vfsmount *parent_mnt,
++                                 const struct filename *from);
++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
++                              const struct dentry *parent_dentry,
++                              const struct vfsmount *parent_mnt,
++                              const struct dentry *old_dentry,
++                              const struct vfsmount *old_mnt, const struct filename *to);
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
++int gr_acl_handle_rename(struct dentry *new_dentry,
++                              struct dentry *parent_dentry,
++                              const struct vfsmount *parent_mnt,
++                              struct dentry *old_dentry,
++                              struct inode *old_parent_inode,
++                              struct vfsmount *old_mnt, const struct filename *newname);
++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++                              struct dentry *old_dentry,
++                              struct dentry *new_dentry,
++                              struct vfsmount *mnt, const __u8 replace);
++__u32 gr_check_link(const struct dentry *new_dentry,
++                         const struct dentry *parent_dentry,
++                         const struct vfsmount *parent_mnt,
++                         const struct dentry *old_dentry,
++                         const struct vfsmount *old_mnt);
++int gr_acl_handle_filldir(const struct file *file, const char *name,
++                               const unsigned int namelen, const ino_t ino);
++
++__u32 gr_acl_handle_unix(const struct dentry *dentry,
++                              const struct vfsmount *mnt);
++void gr_acl_handle_exit(void);
++void gr_acl_handle_psacct(struct task_struct *task, const long code);
++int gr_acl_handle_procpidmem(const struct task_struct *task);
++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
++void gr_audit_ptrace(struct task_struct *task);
++dev_t gr_get_dev_from_dentry(struct dentry *dentry);
++void gr_put_exec_file(struct task_struct *task);
++
++int gr_ptrace_readexec(struct file *file, int unsafe_flags);
++
++#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
++extern void gr_learn_resource(const struct task_struct *task, const int res,
++                            const unsigned long wanted, const int gt);
++#else
++static inline void gr_learn_resource(const struct task_struct *task, const int res,
++                                   const unsigned long wanted, const int gt)
++{
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task, const int res,
++                                 const unsigned long wanted, const int gt);
++#else
++static inline void gr_log_resource(const struct task_struct *task, const int res,
++                                 const unsigned long wanted, const int gt)
++{
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
++void gr_handle_vm86(void);
++void gr_handle_mem_readwrite(u64 from, u64 to);
++
++void gr_log_badprocpid(const char *entry);
++
++extern int grsec_enable_dmesg;
++extern int grsec_disable_privio;
++
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++extern kgid_t grsec_proc_gid;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++extern int grsec_enable_chroot_findtask;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int grsec_enable_setxid;
++#endif
++#endif
++
++#endif
+diff --git a/include/linux/grsock.h b/include/linux/grsock.h
+new file mode 100644
+index 0000000..e7ffaaf
+--- /dev/null
++++ b/include/linux/grsock.h
+@@ -0,0 +1,19 @@
++#ifndef __GRSOCK_H
++#define __GRSOCK_H
++
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++                            const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_server_other(const struct sock *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(struct socket * sock,
++                           struct sockaddr_in * addr);
++extern int gr_search_bind(struct socket * sock,
++                        struct sockaddr_in * addr);
++extern int gr_search_listen(struct socket * sock);
++extern int gr_search_accept(struct socket * sock);
++extern int gr_search_socket(const int domain, const int type,
++                          const int protocol);
++
++#endif
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 7fb31da..08b5114 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
+       kunmap_atomic(kaddr);
+ }
++static inline void sanitize_highpage(struct page *page)
++{
++      void *kaddr;
++      unsigned long flags;
++
++      local_irq_save(flags);
++      kaddr = kmap_atomic(page);
++      clear_page(kaddr);
++      kunmap_atomic(kaddr);
++      local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+       unsigned start1, unsigned end1,
+       unsigned start2, unsigned end2)
+diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
+index 1c7b89a..7f52502 100644
+--- a/include/linux/hwmon-sysfs.h
++++ b/include/linux/hwmon-sysfs.h
+@@ -25,7 +25,8 @@
+ struct sensor_device_attribute{
+       struct device_attribute dev_attr;
+       int index;
+-};
++} __do_const;
++typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
+ #define to_sensor_dev_attr(_dev_attr) \
+       container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
+@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
+       struct device_attribute dev_attr;
+       u8 index;
+       u8 nr;
+-};
++} __do_const;
+ #define to_sensor_dev_attr_2(_dev_attr) \
+       container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index e988fa9..ff9f17e 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -366,6 +366,7 @@ struct i2c_algorithm {
+       /* To determine what the adapter supports */
+       u32 (*functionality) (struct i2c_adapter *);
+ };
++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
+ /**
+  * struct i2c_bus_recovery_info - I2C bus recovery information
+diff --git a/include/linux/i2o.h b/include/linux/i2o.h
+index d23c3c2..eb63c81 100644
+--- a/include/linux/i2o.h
++++ b/include/linux/i2o.h
+@@ -565,7 +565,7 @@ struct i2o_controller {
+       struct i2o_device *exec;        /* Executive */
+ #if BITS_PER_LONG == 64
+       spinlock_t context_list_lock;   /* lock for context_list */
+-      atomic_t context_list_counter;  /* needed for unique contexts */
++      atomic_unchecked_t context_list_counter;        /* needed for unique contexts */
+       struct list_head context_list;  /* list of context id's
+                                          and pointers */
+ #endif
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index aff7ad8..3942bbd 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -76,7 +76,7 @@ struct pppox_proto {
+       int             (*ioctl)(struct socket *sock, unsigned int cmd,
+                                unsigned long arg);
+       struct module   *owner;
+-};
++} __do_const;
+ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 8618147..0821126 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -39,9 +39,36 @@
+  * Also note, that this data cannot be "const".
+  */
++#ifdef MODULE
++#define add_init_latent_entropy
++#define add_devinit_latent_entropy
++#define add_cpuinit_latent_entropy
++#define add_meminit_latent_entropy
++#else
++#define add_init_latent_entropy __latent_entropy
++
++#ifdef CONFIG_HOTPLUG
++#define add_devinit_latent_entropy
++#else
++#define add_devinit_latent_entropy __latent_entropy
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define add_cpuinit_latent_entropy
++#else
++#define add_cpuinit_latent_entropy __latent_entropy
++#endif
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++#define add_meminit_latent_entropy
++#else
++#define add_meminit_latent_entropy __latent_entropy
++#endif
++#endif
++
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+-#define __init                __section(.init.text) __cold notrace
++#define __init                __section(.init.text) __cold notrace add_init_latent_entropy
+ #define __initdata    __section(.init.data)
+ #define __initconst   __constsection(.init.rodata)
+ #define __exitdata    __section(.exit.data)
+@@ -94,7 +121,7 @@
+ #define __exit          __section(.exit.text) __exitused __cold notrace
+ /* Used for HOTPLUG_CPU */
+-#define __cpuinit        __section(.cpuinit.text) __cold notrace
++#define __cpuinit        __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
+ #define __cpuinitdata    __section(.cpuinit.data)
+ #define __cpuinitconst   __constsection(.cpuinit.rodata)
+ #define __cpuexit        __section(.cpuexit.text) __exitused __cold notrace
+@@ -102,7 +129,7 @@
+ #define __cpuexitconst   __constsection(.cpuexit.rodata)
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit        __section(.meminit.text) __cold notrace
++#define __meminit        __section(.meminit.text) __cold notrace add_meminit_latent_entropy
+ #define __meminitdata    __section(.meminit.data)
+ #define __meminitconst   __constsection(.meminit.rodata)
+ #define __memexit        __section(.memexit.text) __exitused __cold notrace
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 5cd0f09..c9f67cc 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
+ #define INIT_TASK_COMM "swapper"
++#ifdef CONFIG_X86
++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
++#else
++#define INIT_TASK_THREAD_INFO
++#endif
++
+ /*
+  *  INIT_TASK is used to set up the first task table, touch at
+  * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
+       RCU_POINTER_INITIALIZER(cred, &init_cred),                      \
+       .comm           = INIT_TASK_COMM,                               \
+       .thread         = INIT_THREAD,                                  \
++      INIT_TASK_THREAD_INFO                                           \
+       .fs             = &init_fs,                                     \
+       .files          = &init_files,                                  \
+       .signal         = &init_signals,                                \
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 5fa5afe..ac55b25 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -430,7 +430,7 @@ enum
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+  * kernel/softirq.c when adding a new softirq.
+  */
+-extern char *softirq_to_name[NR_SOFTIRQS];
++extern const char * const softirq_to_name[NR_SOFTIRQS];
+ /* softirq mask and active fields moved to irq_cpustat_t in
+  * asm/hardirq.h to get better cache usage.  KAO
+@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
+ struct softirq_action
+ {
+-      void    (*action)(struct softirq_action *);
+-};
++      void    (*action)(void);
++} __no_const;
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 3aeb730..2177f39 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -113,7 +113,7 @@ struct iommu_ops {
+       u32 (*domain_get_windows)(struct iommu_domain *domain);
+       unsigned long pgsize_bitmap;
+-};
++} __do_const;
+ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE         1 /* Device added */
+ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE         2 /* Pre Device removed */
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 89b7c24..382af74 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+                   resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+       return res->end - res->start + 1;
+ }
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index bc4e066..50468a9 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -328,7 +328,8 @@ struct irq_chip {
+       void            (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
+       unsigned long   flags;
+-};
++} __do_const;
++typedef struct irq_chip __no_const irq_chip_no_const;
+ /*
+  * irq_chip specific flags
+diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
+index 3e203eb..3fe68d0 100644
+--- a/include/linux/irqchip/arm-gic.h
++++ b/include/linux/irqchip/arm-gic.h
+@@ -59,9 +59,11 @@
+ #ifndef __ASSEMBLY__
++#include <linux/irq.h>
++
+ struct device_node;
+-extern struct irq_chip gic_arch_extn;
++extern irq_chip_no_const gic_arch_extn;
+ void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
+                   u32 offset, struct device_node *);
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index 6883e19..e854fcb 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -15,7 +15,8 @@
+ struct module;
+-#ifdef CONFIG_KALLSYMS
++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
+ /* Stupid that this does nothing, but I didn't create this mess. */
+ #define __print_symbol(fmt, addr)
+ #endif /*CONFIG_KALLSYMS*/
++#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
++      arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
++extern unsigned long kallsyms_lookup_name(const char *name);
++extern void __print_symbol(const char *fmt, unsigned long address);
++extern int sprint_backtrace(char *buffer, unsigned long address);
++extern int sprint_symbol(char *buffer, unsigned long address);
++extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
++const char *kallsyms_lookup(unsigned long addr,
++                          unsigned long *symbolsize,
++                          unsigned long *offset,
++                          char **modname, char *namebuf);
++extern int kallsyms_lookup_size_offset(unsigned long addr,
++                                unsigned long *symbolsize,
++                                unsigned long *offset);
++#endif
+ /* This macro allows us to keep printk typechecking */
+ static __printf(1, 2)
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index 518a53a..5e28358 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -125,7 +125,7 @@ struct key_type {
+       /* internal fields */
+       struct list_head        link;           /* link in types list */
+       struct lock_class_key   lock_class;     /* key->sem lock class */
+-};
++} __do_const;
+ extern struct key_type key_type_keyring;
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index c6e091b..a940adf 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -52,7 +52,7 @@ extern int kgdb_connected;
+ extern int kgdb_io_module_registered;
+ extern atomic_t                       kgdb_setting_breakpoint;
+-extern atomic_t                       kgdb_cpu_doing_single_step;
++extern atomic_unchecked_t     kgdb_cpu_doing_single_step;
+ extern struct task_struct     *kgdb_usethread;
+ extern struct task_struct     *kgdb_contthread;
+@@ -254,7 +254,7 @@ struct kgdb_arch {
+       void    (*correct_hw_break)(void);
+       void    (*enable_nmi)(bool on);
+-};
++} __do_const;
+ /**
+  * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+@@ -279,7 +279,7 @@ struct kgdb_io {
+       void                    (*pre_exception) (void);
+       void                    (*post_exception) (void);
+       int                     is_console;
+-};
++} __do_const;
+ extern struct kgdb_arch               arch_kgdb_ops;
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index 0555cc6..b16a7a4 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
+  * usually useless though. */
+ extern __printf(2, 3)
+ int __request_module(bool wait, const char *name, ...);
++extern __printf(3, 4)
++int ___request_module(bool wait, char *param_name, const char *name, ...);
+ #define request_module(mod...) __request_module(true, mod)
+ #define request_module_nowait(mod...) __request_module(false, mod)
+ #define try_then_request_module(x, mod...) \
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index 939b112..ed6ed51 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -111,7 +111,7 @@ struct kobj_type {
+       struct attribute **default_attrs;
+       const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+       const void *(*namespace)(struct kobject *kobj);
+-};
++} __do_const;
+ struct kobj_uevent_env {
+       char *envp[UEVENT_NUM_ENVP];
+@@ -134,6 +134,7 @@ struct kobj_attribute {
+       ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+                        const char *buf, size_t count);
+ };
++typedef struct kobj_attribute __no_const kobj_attribute_no_const;
+ extern const struct sysfs_ops kobj_sysfs_ops;
+diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
+index f66b065..c2c29b4 100644
+--- a/include/linux/kobject_ns.h
++++ b/include/linux/kobject_ns.h
+@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
+       const void *(*netlink_ns)(struct sock *sk);
+       const void *(*initial_ns)(void);
+       void (*drop_ns)(void *);
+-};
++} __do_const;
+ int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+ int kobj_ns_type_registered(enum kobj_ns_type type);
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index 484604d..0f6c5b6 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
+ static inline int kref_sub(struct kref *kref, unsigned int count,
+            void (*release)(struct kref *kref))
+ {
+-      WARN_ON(release == NULL);
++      BUG_ON(release == NULL);
+       if (atomic_sub_and_test((int) count, &kref->refcount)) {
+               release(kref);
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 8db53cf..c21121d 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
+ {
+ }
+ #endif
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+                 struct module *module);
+ void kvm_exit(void);
+@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg);
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+-int kvm_arch_init(void *opaque);
++int kvm_arch_init(const void *opaque);
+ void kvm_arch_exit(void);
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index eae7a05..2cdd875 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -919,7 +919,7 @@ struct ata_port_operations {
+        * fields must be pointers.
+        */
+       const struct ata_port_operations        *inherits;
+-};
++} __do_const;
+ struct ata_port_info {
+       unsigned long           flags;
+diff --git a/include/linux/list.h b/include/linux/list.h
+index b83e565..baa6c1d 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
+ extern void list_del(struct list_head *entry);
+ #endif
++extern void __pax_list_add(struct list_head *new,
++                            struct list_head *prev,
++                            struct list_head *next);
++static inline void pax_list_add(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add(new, head, head->next);
++}
++static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add(new, head->prev, head);
++}
++extern void pax_list_del(struct list_head *entry);
++
+ /**
+  * list_replace - replace old entry by new one
+  * @old : the element to be replaced
+@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
+       INIT_LIST_HEAD(entry);
+ }
++extern void pax_list_del_init(struct list_head *entry);
++
+ /**
+  * list_move - delete from one list and add as another's head
+  * @list: the entry to move
+diff --git a/include/linux/math64.h b/include/linux/math64.h
+index 2913b86..8dcbb1e 100644
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -15,7 +15,7 @@
+  * This is commonly provided by 32bit archs to provide an optimized 64bit
+  * divide.
+  */
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       *remainder = dividend % divisor;
+       return dividend / divisor;
+@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+ /**
+  * div64_u64 - unsigned 64bit divide with 64bit divisor
+  */
+-static inline u64 div64_u64(u64 dividend, u64 divisor)
++static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
+ {
+       return dividend / divisor;
+ }
+@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
+ #define div64_ul(x, y)   div_u64((x), (y))
+ #ifndef div_u64_rem
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       *remainder = do_div(dividend, divisor);
+       return dividend;
+@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
+  * divide.
+  */
+ #ifndef div_u64
+-static inline u64 div_u64(u64 dividend, u32 divisor)
++static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
+ {
+       u32 remainder;
+       return div_u64_rem(dividend, divisor, &remainder);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index e0c8528..bcf0c29 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
+ #define VM_HUGETLB    0x00400000      /* Huge TLB Page VM */
+ #define VM_NONLINEAR  0x00800000      /* Is non-linear (remap_file_pages) */
+ #define VM_ARCH_1     0x01000000      /* Architecture-specific flag */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define VM_PAGEEXEC   0x02000000      /* vma->vm_page_prot needs special handling */
++#endif
++
+ #define VM_DONTDUMP   0x04000000      /* Do not include in the core dump */
+ #define VM_MIXEDMAP   0x10000000      /* Can contain "struct page" and pure PFN pages */
+@@ -205,8 +210,8 @@ struct vm_operations_struct {
+       /* called by access_process_vm when get_user_pages() fails, typically
+        * for use by special VMAs that can switch between memory and hardware
+        */
+-      int (*access)(struct vm_area_struct *vma, unsigned long addr,
+-                    void *buf, int len, int write);
++      ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
++                    void *buf, size_t len, int write);
+ #ifdef CONFIG_NUMA
+       /*
+        * set_policy() op must add a reference to any non-NULL @new mempolicy
+@@ -236,6 +241,7 @@ struct vm_operations_struct {
+       int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
+                          unsigned long size, pgoff_t pgoff);
+ };
++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
+ struct mmu_gather;
+ struct inode;
+@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+               unsigned int flags, unsigned long *prot, resource_size_t *phys);
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+-                      void *buf, int len, int write);
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++                      void *buf, size_t len, int write);
+ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen)
+@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+ }
+ #endif
+-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write);
++extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
++extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write);
+ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                     unsigned long start, unsigned long nr_pages,
+@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
+ int set_page_dirty_lock(struct page *page);
+ int clear_page_dirty_for_io(struct page *page);
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+-      return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+-                                           unsigned long addr)
+-{
+-      return (vma->vm_flags & VM_GROWSDOWN) &&
+-              (vma->vm_start == addr) &&
+-              !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+-      return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+-                                         unsigned long addr)
+-{
+-      return (vma->vm_flags & VM_GROWSUP) &&
+-              (vma->vm_end == addr) &&
+-              !vma_growsup(vma->vm_next, addr);
+-}
+-
+ extern pid_t
+ vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
+@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
+ }
+ #endif
++#ifdef CONFIG_MMU
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
++#else
++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
++{
++      return __pgprot(0);
++}
++#endif
++
+ int vma_wants_writenotify(struct vm_area_struct *vma);
+ extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ {
+       return 0;
+ }
++
++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
++                                              unsigned long address)
++{
++      return 0;
++}
+ #else
+ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+ #endif
+ #ifdef __PAGETABLE_PMD_FOLDED
+@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ {
+       return 0;
+ }
++
++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
++                                              unsigned long address)
++{
++      return 0;
++}
+ #else
+ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
+ #endif
+ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+               NULL: pud_offset(pgd, address);
+ }
++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++      return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
++              NULL: pud_offset(pgd, address);
++}
++
+ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ {
+       return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+               NULL: pmd_offset(pud, address);
+ }
++
++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++      return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
++              NULL: pmd_offset(pud, address);
++}
+ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+ #if USE_SPLIT_PTLOCKS
+@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot, unsigned long flags,
+       unsigned long pgoff, unsigned long *populate);
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+ #ifdef CONFIG_MMU
+ extern int __mm_populate(unsigned long addr, unsigned long len,
+@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
+       unsigned long high_limit;
+       unsigned long align_mask;
+       unsigned long align_offset;
++      unsigned long threadstack_offset;
+ };
+-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
+-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
++extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
++extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
+ /*
+  * Search for an unmapped address range.
+@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
+  */
+ static inline unsigned long
+-vm_unmapped_area(struct vm_unmapped_area_info *info)
++vm_unmapped_area(const struct vm_unmapped_area_info *info)
+ {
+       if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
+               return unmapped_area(info);
+@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+                                            struct vm_area_struct **pprev);
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+    NULL if none.  Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+       return vma;
+ }
+-#ifdef CONFIG_MMU
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+-#else
+-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+-{
+-      return __pgprot(0);
+-}
+-#endif
+-
+ #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+ unsigned long change_prot_numa(struct vm_area_struct *vma,
+                       unsigned long start, unsigned long end);
+@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+ static inline void vm_stat_account(struct mm_struct *mm,
+                       unsigned long flags, struct file *file, long pages)
+ {
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
+       mm->total_vm += pages;
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
+ extern int sysctl_memory_failure_early_kill;
+ extern int sysctl_memory_failure_recovery;
+ extern void shake_page(struct page *p, int access);
+-extern atomic_long_t num_poisoned_pages;
++extern atomic_long_unchecked_t num_poisoned_pages;
+ extern int soft_offline_page(struct page *page, int flags);
+ extern void dump_page(struct page *page);
+@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
+ static inline void setup_nr_node_ids(void) {}
+ #endif
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index ace9a5f..81bdb59 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -289,6 +289,8 @@ struct vm_area_struct {
+ #ifdef CONFIG_NUMA
+       struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
+ #endif
++
++      struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+ };
+ struct core_thread {
+@@ -437,6 +439,24 @@ struct mm_struct {
+       int first_nid;
+ #endif
+       struct uprobes_state uprobes_state;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++      unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      unsigned long delta_mmap;               /* randomized offset */
++      unsigned long delta_stack;              /* randomized offset */
++#endif
++
+ };
+ /* first nid will either be a valid NID or one of these values */
+diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
+index c5d5278..f0b68c8 100644
+--- a/include/linux/mmiotrace.h
++++ b/include/linux/mmiotrace.h
+@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+ /* Called from ioremap.c */
+ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr);
+-extern void mmiotrace_iounmap(volatile void __iomem *addr);
++extern void mmiotrace_iounmap(const volatile void __iomem *addr);
+ /* For anyone to insert markers. Remember trailing newline. */
+ extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
+@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
+ {
+ }
+-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
++static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 5c76737..61f518e 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -396,7 +396,7 @@ struct zone {
+       unsigned long           flags;             /* zone flags, see below */
+       /* Zone statistics */
+-      atomic_long_t           vm_stat[NR_VM_ZONE_STAT_ITEMS];
++      atomic_long_unchecked_t         vm_stat[NR_VM_ZONE_STAT_ITEMS];
+       /*
+        * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index b508016..237cfe5 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -13,7 +13,7 @@
+ typedef unsigned long kernel_ulong_t;
+ #endif
+-#define PCI_ANY_ID (~0)
++#define PCI_ANY_ID ((__u16)~0)
+ struct pci_device_id {
+       __u32 vendor, device;           /* Vendor and device ID or PCI_ANY_ID*/
+@@ -139,7 +139,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL      0x0200
+ #define USB_DEVICE_ID_MATCH_INT_NUMBER                0x0400
+-#define HID_ANY_ID                            (~0)
++#define HID_ANY_ID                            (~0U)
+ #define HID_BUS_ANY                           0xffff
+ #define HID_GROUP_ANY                         0x0000
+@@ -465,7 +465,7 @@ struct dmi_system_id {
+       const char *ident;
+       struct dmi_strmatch matches[4];
+       void *driver_data;
+-};
++} __do_const;
+ /*
+  * struct dmi_device_id appears during expansion of
+  * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 46f1ea0..a34ca37 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -17,9 +17,11 @@
+ #include <linux/moduleparam.h>
+ #include <linux/tracepoint.h>
+ #include <linux/export.h>
++#include <linux/fs.h>
+ #include <linux/percpu.h>
+ #include <asm/module.h>
++#include <asm/pgtable.h>
+ /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+ #define MODULE_SIG_STRING "~Module signature appended~\n"
+@@ -54,12 +56,13 @@ struct module_attribute {
+       int (*test)(struct module *);
+       void (*free)(struct module *);
+ };
++typedef struct module_attribute __no_const module_attribute_no_const;
+ struct module_version_attribute {
+       struct module_attribute mattr;
+       const char *module_name;
+       const char *version;
+-} __attribute__ ((__aligned__(sizeof(void *))));
++} __do_const __attribute__ ((__aligned__(sizeof(void *))));
+ extern ssize_t __modver_version_show(struct module_attribute *,
+                                    struct module_kobject *, char *);
+@@ -232,7 +235,7 @@ struct module
+       /* Sysfs stuff. */
+       struct module_kobject mkobj;
+-      struct module_attribute *modinfo_attrs;
++      module_attribute_no_const *modinfo_attrs;
+       const char *version;
+       const char *srcversion;
+       struct kobject *holders_dir;
+@@ -281,19 +284,16 @@ struct module
+       int (*init)(void);
+       /* If this is non-NULL, vfree after init() returns */
+-      void *module_init;
++      void *module_init_rx, *module_init_rw;
+       /* Here is the actual code + data, vfree'd on unload. */
+-      void *module_core;
++      void *module_core_rx, *module_core_rw;
+       /* Here are the sizes of the init and core sections */
+-      unsigned int init_size, core_size;
++      unsigned int init_size_rw, core_size_rw;
+       /* The size of the executable code in each section.  */
+-      unsigned int init_text_size, core_text_size;
+-
+-      /* Size of RO sections of the module (text+rodata) */
+-      unsigned int init_ro_size, core_ro_size;
++      unsigned int init_size_rx, core_size_rx;
+       /* Arch-specific module values */
+       struct mod_arch_specific arch;
+@@ -349,6 +349,10 @@ struct module
+ #ifdef CONFIG_EVENT_TRACING
+       struct ftrace_event_call **trace_events;
+       unsigned int num_trace_events;
++      struct file_operations trace_id;
++      struct file_operations trace_enable;
++      struct file_operations trace_format;
++      struct file_operations trace_filter;
+ #endif
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+       unsigned int num_ftrace_callsites;
+@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      if (ktla_ktva(addr) >= (unsigned long)start &&
++          ktla_ktva(addr) < (unsigned long)start + size)
++              return 1;
++#endif
++
++      return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
++{
++      return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
++}
++
++static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
++{
++      return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
++}
++
++static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
++{
++      return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
++}
++
++static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
++{
++      return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
++}
++
+ static inline int within_module_core(unsigned long addr, const struct module *mod)
+ {
+-      return (unsigned long)mod->module_core <= addr &&
+-             addr < (unsigned long)mod->module_core + mod->core_size;
++      return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
+ }
+ static inline int within_module_init(unsigned long addr, const struct module *mod)
+ {
+-      return (unsigned long)mod->module_init <= addr &&
+-             addr < (unsigned long)mod->module_init + mod->init_size;
++      return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
+ }
+ /* Search for module by name: must hold module_mutex. */
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 560ca53..ef621ef 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
+    sections.  Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
++
+ /* Free memory returned from module_alloc. */
+ void module_free(struct module *mod, void *module_region);
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region);
++#else
++#define module_free_exec(x, y) module_free((x), (y))
++#endif
++
+ /*
+  * Apply the given relocation to the (simplified) ELF.  Return -error
+  * or 0.
+@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
+                                unsigned int relsec,
+                                struct module *me)
+ {
++#ifdef CONFIG_MODULES
+       printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
++#endif
+       return -ENOEXEC;
+ }
+ #endif
+@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
+                                    unsigned int relsec,
+                                    struct module *me)
+ {
++#ifdef CONFIG_MODULES
+       printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
++#endif
+       return -ENOEXEC;
+ }
+ #endif
+diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
+index 137b419..fe663ec 100644
+--- a/include/linux/moduleparam.h
++++ b/include/linux/moduleparam.h
+@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
+  * @len is usually just sizeof(string).
+  */
+ #define module_param_string(name, string, len, perm)                  \
+-      static const struct kparam_string __param_string_##name         \
++      static const struct kparam_string __param_string_##name __used  \
+               = { len, string };                                      \
+       __module_param_call(MODULE_PARAM_PREFIX, name,                  \
+                           &param_ops_string,                          \
+@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
+  */
+ #define module_param_array_named(name, array, type, nump, perm)               \
+       param_check_##type(name, &(array)[0]);                          \
+-      static const struct kparam_array __param_arr_##name             \
++      static const struct kparam_array __param_arr_##name __used      \
+       = { .max = ARRAY_SIZE(array), .num = nump,                      \
+           .ops = &param_ops_##type,                                   \
+           .elemsize = sizeof(array[0]), .elem = array };              \
+diff --git a/include/linux/namei.h b/include/linux/namei.h
+index 5a5ff57..5ae5070 100644
+--- a/include/linux/namei.h
++++ b/include/linux/namei.h
+@@ -19,7 +19,7 @@ struct nameidata {
+       unsigned        seq;
+       int             last_type;
+       unsigned        depth;
+-      char *saved_names[MAX_NESTED_LINKS + 1];
++      const char *saved_names[MAX_NESTED_LINKS + 1];
+ };
+ /*
+@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
+ extern void nd_jump_link(struct nameidata *nd, struct path *path);
+-static inline void nd_set_link(struct nameidata *nd, char *path)
++static inline void nd_set_link(struct nameidata *nd, const char *path)
+ {
+       nd->saved_names[nd->depth] = path;
+ }
+-static inline char *nd_get_link(struct nameidata *nd)
++static inline const char *nd_get_link(const struct nameidata *nd)
+ {
+       return nd->saved_names[nd->depth];
+ }
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 99c9f0c..e1cf296 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -183,7 +183,7 @@ struct net_proto_family {
+       int             (*create)(struct net *net, struct socket *sock,
+                                 int protocol, int kern);
+       struct module   *owner;
+-};
++} __do_const;
+ struct iovec;
+ struct kvec;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 96e4c21..9cc8278 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1026,6 +1026,7 @@ struct net_device_ops {
+       int                     (*ndo_change_carrier)(struct net_device *dev,
+                                                     bool new_carrier);
+ };
++typedef struct net_device_ops __no_const net_device_ops_no_const;
+ /*
+  *    The DEVICE structure.
+@@ -1094,7 +1095,7 @@ struct net_device {
+       int                     iflink;
+       struct net_device_stats stats;
+-      atomic_long_t           rx_dropped; /* dropped packets by core network
++      atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
+                                            * Do not use this in drivers.
+                                            */
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index 0060fde..481c6ae 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
+ #endif
+       /* Use the module struct to lock set/get code in place */
+       struct module *owner;
+-};
++} __do_const;
+ /* Function to register/unregister hook points. */
+ int nf_register_hook(struct nf_hook_ops *reg);
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index d80e275..c3510b8 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -124,7 +124,7 @@ struct ip_set_type_variant {
+       /* Return true if "b" set is the same as "a"
+        * according to the create set parameters */
+       bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+-};
++} __do_const;
+ /* The core set type structure */
+ struct ip_set_type {
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index cadb740..d7c37c0 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -16,7 +16,7 @@ struct nfnl_callback {
+                   const struct nlattr * const cda[]);
+       const struct nla_policy *policy;        /* netlink attribute policy */
+       const u_int16_t attr_count;             /* number of nlattr's */
+-};
++} __do_const;
+ struct nfnetlink_subsystem {
+       const char *name;
+diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
+new file mode 100644
+index 0000000..33f4af8
+--- /dev/null
++++ b/include/linux/netfilter/xt_gradm.h
+@@ -0,0 +1,9 @@
++#ifndef _LINUX_NETFILTER_XT_GRADM_H
++#define _LINUX_NETFILTER_XT_GRADM_H 1
++
++struct xt_gradm_mtinfo {
++      __u16 flags;
++      __u16 invflags;
++};
++
++#endif
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index 5dc635f..35f5e11 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -31,7 +31,7 @@ struct nls_table {
+       const unsigned char *charset2upper;
+       struct module *owner;
+       struct nls_table *next;
+-};
++} __do_const;
+ /* this value hold the maximum octet of charset */
+ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index d14a4c3..a078786 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -54,7 +54,8 @@ struct notifier_block {
+       notifier_fn_t notifier_call;
+       struct notifier_block __rcu *next;
+       int priority;
+-};
++} __do_const;
++typedef struct notifier_block __no_const notifier_block_no_const;
+ struct atomic_notifier_head {
+       spinlock_t lock;
+diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
+index a4c5624..79d6d88 100644
+--- a/include/linux/oprofile.h
++++ b/include/linux/oprofile.h
+@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+       char const * name, ulong * val);
+  
+-/** Create a file for read-only access to an atomic_t. */
++/** Create a file for read-only access to an atomic_unchecked_t. */
+ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+-      char const * name, atomic_t * val);
++      char const * name, atomic_unchecked_t * val);
+  
+ /** create a directory */
+ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
+index 8db71dc..a76bf2c 100644
+--- a/include/linux/pci_hotplug.h
++++ b/include/linux/pci_hotplug.h
+@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
+       int (*get_attention_status)     (struct hotplug_slot *slot, u8 *value);
+       int (*get_latch_status)         (struct hotplug_slot *slot, u8 *value);
+       int (*get_adapter_status)       (struct hotplug_slot *slot, u8 *value);
+-};
++} __do_const;
++typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
+ /**
+  * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index c5b6dbf..b124155 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -318,8 +318,8 @@ struct perf_event {
+       enum perf_event_active_state    state;
+       unsigned int                    attach_state;
+-      local64_t                       count;
+-      atomic64_t                      child_count;
++      local64_t                       count; /* PaX: fix it one day */
++      atomic64_unchecked_t            child_count;
+       /*
+        * These are the total time in nanoseconds that the event
+@@ -370,8 +370,8 @@ struct perf_event {
+        * These accumulate total time (in nanoseconds) that children
+        * events have been enabled and running, respectively.
+        */
+-      atomic64_t                      child_total_time_enabled;
+-      atomic64_t                      child_total_time_running;
++      atomic64_unchecked_t            child_total_time_enabled;
++      atomic64_unchecked_t            child_total_time_running;
+       /*
+        * Protect attach/detach and child_list:
+@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
+               entry->ip[entry->nr++] = ip;
+ }
+-extern int sysctl_perf_event_paranoid;
++extern int sysctl_perf_event_legitimately_concerned;
+ extern int sysctl_perf_event_mlock;
+ extern int sysctl_perf_event_sample_rate;
+@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
++static inline bool perf_paranoid_any(void)
++{
++      return sysctl_perf_event_legitimately_concerned > 2;
++}
++
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+-      return sysctl_perf_event_paranoid > -1;
++      return sysctl_perf_event_legitimately_concerned > -1;
+ }
+ static inline bool perf_paranoid_cpu(void)
+ {
+-      return sysctl_perf_event_paranoid > 0;
++      return sysctl_perf_event_legitimately_concerned > 0;
+ }
+ static inline bool perf_paranoid_kernel(void)
+ {
+-      return sysctl_perf_event_paranoid > 1;
++      return sysctl_perf_event_legitimately_concerned > 1;
+ }
+ extern void perf_event_init(void);
+@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void)                  { }
+  */
+ #define perf_cpu_notifier(fn)                                         \
+ do {                                                                  \
+-      static struct notifier_block fn##_nb __cpuinitdata =            \
++      static struct notifier_block fn##_nb =                          \
+               { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
+       unsigned long cpu = smp_processor_id();                         \
+       unsigned long flags;                                            \
+@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
+       struct device_attribute attr;
+       u64 id;
+       const char *event_str;
+-};
++} __do_const;
+ #define PMU_EVENT_ATTR(_name, _var, _id, _show)                               \
+ static struct perf_pmu_events_attr _var = {                           \
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index b8809fe..ae4ccd0 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -47,10 +47,10 @@ struct pipe_inode_info {
+       struct mutex mutex;
+       wait_queue_head_t wait;
+       unsigned int nrbufs, curbuf, buffers;
+-      unsigned int readers;
+-      unsigned int writers;
+-      unsigned int files;
+-      unsigned int waiting_writers;
++      atomic_t readers;
++      atomic_t writers;
++      atomic_t files;
++      atomic_t waiting_writers;
+       unsigned int r_counter;
+       unsigned int w_counter;
+       struct page *tmp_page;
+diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
+index 5f28cae..3d23723 100644
+--- a/include/linux/platform_data/usb-ehci-s5p.h
++++ b/include/linux/platform_data/usb-ehci-s5p.h
+@@ -14,7 +14,7 @@
+ struct s5p_ehci_platdata {
+       int (*phy_init)(struct platform_device *pdev, int type);
+       int (*phy_exit)(struct platform_device *pdev, int type);
+-};
++} __no_const;
+ extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
+diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
+index c256c59..8ea94c7 100644
+--- a/include/linux/platform_data/usb-ohci-exynos.h
++++ b/include/linux/platform_data/usb-ohci-exynos.h
+@@ -14,7 +14,7 @@
+ struct exynos4_ohci_platdata {
+       int (*phy_init)(struct platform_device *pdev, int type);
+       int (*phy_exit)(struct platform_device *pdev, int type);
+-};
++} __no_const;
+ extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 7c1d252..c5c773e 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -48,7 +48,7 @@ struct gpd_dev_ops {
+ struct gpd_cpu_data {
+       unsigned int saved_exit_latency;
+-      struct cpuidle_state *idle_state;
++      cpuidle_state_no_const *idle_state;
+ };
+ struct generic_pm_domain {
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 7d7e09e..8671ef8 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
+ static inline void pm_runtime_mark_last_busy(struct device *dev)
+ {
+-      ACCESS_ONCE(dev->power.last_busy) = jiffies;
++      ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
+ }
+ #else /* !CONFIG_PM_RUNTIME */
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index 195aafc..49a7bc2 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
+ struct pnp_fixup {
+       char id[7];
+       void (*quirk_function) (struct pnp_dev * dev);  /* fixup function */
+-};
++} __do_const;
+ /* config parameters */
+ #define PNP_CONFIG_NORMAL     0x0001
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 2110a81..13a11bb 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -19,8 +19,8 @@
+  * under normal circumstances, used to verify that nobody uses
+  * non-initialized list entries.
+  */
+-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1  ((void *) (long)0xFFFFFF01)
++#define LIST_POISON2  ((void *) (long)0xFFFFFF02)
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
+index c0f44c2..1572583 100644
+--- a/include/linux/power/smartreflex.h
++++ b/include/linux/power/smartreflex.h
+@@ -238,7 +238,7 @@ struct omap_sr_class_data {
+       int (*notify)(struct omap_sr *sr, u32 status);
+       u8 notify_flags;
+       u8 class_type;
+-};
++} __do_const;
+ /**
+  * struct omap_sr_nvalue_table        - Smartreflex n-target value info
+diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
+index 4ea1d37..80f4b33 100644
+--- a/include/linux/ppp-comp.h
++++ b/include/linux/ppp-comp.h
+@@ -84,7 +84,7 @@ struct compressor {
+       struct module *owner;
+       /* Extra skb space needed by the compressor algorithm */
+       unsigned int comp_extra;
+-};
++} __do_const;
+ /*
+  * The return value from decompress routine is the length of the
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index f5d4723..a6ea2fa 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -18,8 +18,13 @@
+ # define sub_preempt_count(val)       do { preempt_count() -= (val); } while (0)
+ #endif
++#define raw_add_preempt_count(val)    do { preempt_count() += (val); } while (0)
++#define raw_sub_preempt_count(val)    do { preempt_count() -= (val); } while (0)
++
+ #define inc_preempt_count() add_preempt_count(1)
++#define raw_inc_preempt_count() raw_add_preempt_count(1)
+ #define dec_preempt_count() sub_preempt_count(1)
++#define raw_dec_preempt_count() raw_sub_preempt_count(1)
+ #define preempt_count()       (current_thread_info()->preempt_count)
+@@ -64,6 +69,12 @@ do { \
+       barrier(); \
+ } while (0)
++#define raw_preempt_disable() \
++do { \
++      raw_inc_preempt_count(); \
++      barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+       barrier(); \
+@@ -72,6 +83,12 @@ do { \
+ #define preempt_enable_no_resched()   sched_preempt_enable_no_resched()
++#define raw_preempt_enable_no_resched() \
++do { \
++      barrier(); \
++      raw_dec_preempt_count(); \
++} while (0)
++
+ #define preempt_enable() \
+ do { \
+       preempt_enable_no_resched(); \
+@@ -116,8 +133,10 @@ do { \
+  * region.
+  */
+ #define preempt_disable()             barrier()
++#define raw_preempt_disable()         barrier()
+ #define sched_preempt_enable_no_resched()     barrier()
+ #define preempt_enable_no_resched()   barrier()
++#define raw_preempt_enable_no_resched()       barrier()
+ #define preempt_enable()              barrier()
+ #define preempt_disable_notrace()             barrier()
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 22c7052..ad3fa0a 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
+ #endif
++extern int kptr_restrict;
++
+ #ifdef CONFIG_PRINTK
+ asmlinkage __printf(5, 0)
+ int vprintk_emit(int facility, int level,
+@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+-extern int kptr_restrict;
+ extern void wake_up_klogd(void);
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 608e60a..c26f864 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
+       return proc_create_data(name, mode, parent, proc_fops, NULL);
+ }
++static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
++      struct proc_dir_entry *parent, const struct file_operations *proc_fops)
++{
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
++#else
++      return proc_create_data(name, mode, parent, proc_fops, NULL);
++#endif
++}
++
++
+ extern void proc_set_size(struct proc_dir_entry *, loff_t);
+ extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
+ extern void *PDE_DATA(const struct inode *);
+diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
+index 34a1e10..03a6d03 100644
+--- a/include/linux/proc_ns.h
++++ b/include/linux/proc_ns.h
+@@ -14,7 +14,7 @@ struct proc_ns_operations {
+       void (*put)(void *ns);
+       int (*install)(struct nsproxy *nsproxy, void *ns);
+       unsigned int (*inum)(void *ns);
+-};
++} __do_const;
+ struct proc_ns {
+       void *ns;
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 3b9377d..61b506a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
+ u32 prandom_u32_state(struct rnd_state *);
+ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
++static inline unsigned long pax_get_random_long(void)
++{
++      return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
++}
++
+ /*
+  * Handle minimum values for seeds
+  */
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index f4b1001..8ddb2b6 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
+               struct list_head *prev, struct list_head *next);
+ #endif
++extern void __pax_list_add_rcu(struct list_head *new,
++              struct list_head *prev, struct list_head *next);
++
+ /**
+  * list_add_rcu - add a new entry to rcu-protected list
+  * @new: new entry to be added
+@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+       __list_add_rcu(new, head, head->next);
+ }
++static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add_rcu(new, head, head->next);
++}
++
+ /**
+  * list_add_tail_rcu - add a new entry to rcu-protected list
+  * @new: new entry to be added
+@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
+       __list_add_rcu(new, head->prev, head);
+ }
++static inline void pax_list_add_tail_rcu(struct list_head *new,
++                                      struct list_head *head)
++{
++      __pax_list_add_rcu(new, head->prev, head);
++}
++
+ /**
+  * list_del_rcu - deletes entry from list without re-initialization
+  * @entry: the element to delete from the list.
+@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
+       entry->prev = LIST_POISON2;
+ }
++extern void pax_list_del_rcu(struct list_head *entry);
++
+ /**
+  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+  * @n: the element to delete from the hash list.
+diff --git a/include/linux/reboot.h b/include/linux/reboot.h
+index 23b3630..e1bc12b 100644
+--- a/include/linux/reboot.h
++++ b/include/linux/reboot.h
+@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
+  * Architecture-specific implementations of sys_reboot commands.
+  */
+-extern void machine_restart(char *cmd);
+-extern void machine_halt(void);
+-extern void machine_power_off(void);
++extern void machine_restart(char *cmd) __noreturn;
++extern void machine_halt(void) __noreturn;
++extern void machine_power_off(void) __noreturn;
+ extern void machine_shutdown(void);
+ struct pt_regs;
+@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
+  */
+ extern void kernel_restart_prepare(char *cmd);
+-extern void kernel_restart(char *cmd);
+-extern void kernel_halt(void);
+-extern void kernel_power_off(void);
++extern void kernel_restart(char *cmd) __noreturn;
++extern void kernel_halt(void) __noreturn;
++extern void kernel_power_off(void) __noreturn;
+ extern int C_A_D; /* for sysctl */
+ void ctrl_alt_del(void);
+@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
+  * Emergency restart, callable from an interrupt handler.
+  */
+-extern void emergency_restart(void);
++extern void emergency_restart(void) __noreturn;
+ #include <asm/emergency-restart.h>
+ #endif /* _LINUX_REBOOT_H */
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 8e0c9fe..ac4d221 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -161,7 +161,8 @@ struct user_regset {
+       unsigned int                    align;
+       unsigned int                    bias;
+       unsigned int                    core_note_type;
+-};
++} __do_const;
++typedef struct user_regset __no_const user_regset_no_const;
+ /**
+  * struct user_regset_view - available regsets
+diff --git a/include/linux/relay.h b/include/linux/relay.h
+index d7c8359..818daf5 100644
+--- a/include/linux/relay.h
++++ b/include/linux/relay.h
+@@ -157,7 +157,7 @@ struct rchan_callbacks
+        * The callback should return 0 if successful, negative if not.
+        */
+       int (*remove_buf_file)(struct dentry *dentry);
+-};
++} __no_const;
+ /*
+  * CONFIG_RELAY kernel API, kernel/relay.c
+diff --git a/include/linux/rio.h b/include/linux/rio.h
+index 18e0993..8ab5b21 100644
+--- a/include/linux/rio.h
++++ b/include/linux/rio.h
+@@ -345,7 +345,7 @@ struct rio_ops {
+       int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
+                       u64 rstart, u32 size, u32 flags);
+       void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
+-};
++} __no_const;
+ #define RIO_RESOURCE_MEM      0x00000100
+ #define RIO_RESOURCE_DOORBELL 0x00000200
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 6dacb93..6174423 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
+ void anon_vma_init(void);     /* create anon_vma_cachep */
+ int  anon_vma_prepare(struct vm_area_struct *);
+ void unlink_anon_vmas(struct vm_area_struct *);
+-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
+ static inline void anon_vma_merge(struct vm_area_struct *vma,
+                                 struct vm_area_struct *next)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 3aeb14b..73816a6 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -62,6 +62,7 @@ struct bio_list;
+ struct fs_struct;
+ struct perf_event_context;
+ struct blk_plug;
++struct linux_binprm;
+ /*
+  * List of flags we want to share for kernel threads,
+@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
+ extern int in_sched_functions(unsigned long addr);
+ #define       MAX_SCHEDULE_TIMEOUT    LONG_MAX
+-extern signed long schedule_timeout(signed long timeout);
++extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
+ extern signed long schedule_timeout_interruptible(signed long timeout);
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+@@ -314,7 +315,19 @@ struct nsproxy;
+ struct user_namespace;
+ #ifdef CONFIG_MMU
+-extern unsigned long mmap_legacy_base(void);
++
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++      return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
++extern unsigned long mmap_legacy_base(struct mm_struct *mm);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -592,6 +605,17 @@ struct signal_struct {
+ #ifdef CONFIG_TASKSTATS
+       struct taskstats *stats;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++      u32 curr_ip;
++      u32 saved_ip;
++      u32 gr_saddr;
++      u32 gr_daddr;
++      u16 gr_sport;
++      u16 gr_dport;
++      u8 used_accept:1;
++#endif
++
+ #ifdef CONFIG_AUDIT
+       unsigned audit_tty;
+       unsigned audit_tty_log_passwd;
+@@ -672,6 +696,14 @@ struct user_struct {
+       struct key *session_keyring;    /* UID's default session keyring */
+ #endif
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      unsigned char kernel_banned;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      unsigned char suid_banned;
++      unsigned long suid_ban_expires;
++#endif
++
+       /* Hash table maintenance information */
+       struct hlist_node uidhash_node;
+       kuid_t uid;
+@@ -1159,8 +1191,8 @@ struct task_struct {
+       struct list_head thread_group;
+       struct completion *vfork_done;          /* for vfork() */
+-      int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
+-      int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
++      pid_t __user *set_child_tid;            /* CLONE_CHILD_SETTID */
++      pid_t __user *clear_child_tid;          /* CLONE_CHILD_CLEARTID */
+       cputime_t utime, stime, utimescaled, stimescaled;
+       cputime_t gtime;
+@@ -1185,11 +1217,6 @@ struct task_struct {
+       struct task_cputime cputime_expires;
+       struct list_head cpu_timers[3];
+-/* process credentials */
+-      const struct cred __rcu *real_cred; /* objective and real subjective task
+-                                       * credentials (COW) */
+-      const struct cred __rcu *cred;  /* effective (overridable) subjective task
+-                                       * credentials (COW) */
+       char comm[TASK_COMM_LEN]; /* executable name excluding path
+                                    - access with [gs]et_task_comm (which lock
+                                      it with task_lock())
+@@ -1206,6 +1233,10 @@ struct task_struct {
+ #endif
+ /* CPU-specific state of this task */
+       struct thread_struct thread;
++/* thread_info moved to task_struct */
++#ifdef CONFIG_X86
++      struct thread_info tinfo;
++#endif
+ /* filesystem information */
+       struct fs_struct *fs;
+ /* open file information */
+@@ -1279,6 +1310,10 @@ struct task_struct {
+       gfp_t lockdep_reclaim_gfp;
+ #endif
++/* process credentials */
++      const struct cred __rcu *real_cred; /* objective and real subjective task
++                                       * credentials (COW) */
++
+ /* journalling filesystem info */
+       void *journal_info;
+@@ -1317,6 +1352,10 @@ struct task_struct {
+       /* cg_list protected by css_set_lock and tsk->alloc_lock */
+       struct list_head cg_list;
+ #endif
++
++      const struct cred __rcu *cred;  /* effective (overridable) subjective task
++                                       * credentials (COW) */
++
+ #ifdef CONFIG_FUTEX
+       struct robust_list_head __user *robust_list;
+ #ifdef CONFIG_COMPAT
+@@ -1417,8 +1456,76 @@ struct task_struct {
+       unsigned int    sequential_io;
+       unsigned int    sequential_io_avg;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++      /* grsecurity */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      u64 exec_id;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      const struct cred *delayed_cred;
++#endif
++      struct dentry *gr_chroot_dentry;
++      struct acl_subject_label *acl;
++      struct acl_role_label *role;
++      struct file *exec_file;
++      unsigned long brute_expires;
++      u16 acl_role_id;
++      /* is this the task that authenticated to the special role */
++      u8 acl_sp_role;
++      u8 is_writable;
++      u8 brute;
++      u8 gr_is_chrooted;
++#endif
++
+ };
++#define MF_PAX_PAGEEXEC               0x01000000      /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP               0x02000000      /* Emulate trampolines */
++#define MF_PAX_MPROTECT               0x04000000      /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP               0x08000000      /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC             0x10000000*/    /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC               0x20000000      /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++      if (likely(tsk->mm))
++              return tsk->mm->pax_flags;
++      else
++              return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++      if (likely(tsk->mm)) {
++              tsk->mm->pax_flags = flags;
++              return 0;
++      }
++      return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++struct path;
++extern char *pax_get_path(const struct path *path, char *buf, int buflen);
++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_refcount_overflow(struct pt_regs *regs);
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+@@ -1477,7 +1584,7 @@ struct pid_namespace;
+ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+                       struct pid_namespace *ns);
+-static inline pid_t task_pid_nr(struct task_struct *tsk)
++static inline pid_t task_pid_nr(const struct task_struct *tsk)
+ {
+       return tsk->pid;
+ }
+@@ -1920,7 +2027,9 @@ void yield(void);
+ extern struct exec_domain     default_exec_domain;
+ union thread_union {
++#ifndef CONFIG_X86
+       struct thread_info thread_info;
++#endif
+       unsigned long stack[THREAD_SIZE/sizeof(long)];
+ };
+@@ -1953,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
+  */
+ extern struct task_struct *find_task_by_vpid(pid_t nr);
++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
+ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+               struct pid_namespace *ns);
+@@ -2119,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+-extern void do_group_exit(int);
++extern __noreturn void do_group_exit(int);
+ extern int allow_signal(int);
+ extern int disallow_signal(int);
+@@ -2310,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+ #endif
+-static inline int object_is_on_stack(void *obj)
++static inline int object_starts_on_stack(void *obj)
+ {
+-      void *stack = task_stack_page(current);
++      const void *stack = task_stack_page(current);
+       return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
+index bf8086b..962b035 100644
+--- a/include/linux/sched/sysctl.h
++++ b/include/linux/sched/sysctl.h
+@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
+ #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+ extern unsigned int sysctl_sched_latency;
+ extern unsigned int sysctl_sched_min_granularity;
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 4686491..2bd210e 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -26,6 +26,7 @@
+ #include <linux/capability.h>
+ #include <linux/slab.h>
+ #include <linux/err.h>
++#include <linux/grsecurity.h>
+ struct linux_binprm;
+ struct cred;
+diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
+index 2da29ac..aac448ec 100644
+--- a/include/linux/seq_file.h
++++ b/include/linux/seq_file.h
+@@ -26,6 +26,9 @@ struct seq_file {
+       struct mutex lock;
+       const struct seq_operations *op;
+       int poll_event;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      u64 exec_id;
++#endif
+ #ifdef CONFIG_USER_NS
+       struct user_namespace *user_ns;
+ #endif
+@@ -38,6 +41,7 @@ struct seq_operations {
+       void * (*next) (struct seq_file *m, void *v, loff_t *pos);
+       int (*show) (struct seq_file *m, void *v);
+ };
++typedef struct seq_operations __no_const seq_operations_no_const;
+ #define SEQ_SKIP 1
+diff --git a/include/linux/shm.h b/include/linux/shm.h
+index 429c199..4d42e38 100644
+--- a/include/linux/shm.h
++++ b/include/linux/shm.h
+@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
+       /* The task created the shm object.  NULL if the task is dead. */
+       struct task_struct      *shm_creator;
++#ifdef CONFIG_GRKERNSEC
++      time_t                  shm_createtime;
++      pid_t                   shm_lapid;
++#endif
+ };
+ /* shm_mode upper byte flags */
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index d897484..323ba98 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -433,6 +433,7 @@ void signals_init(void);
+ int restore_altstack(const stack_t __user *);
+ int __save_altstack(stack_t __user *, unsigned long);
++void __save_altstack_ex(stack_t __user *, unsigned long);
+ #ifdef CONFIG_PROC_FS
+ struct seq_file;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index dec1748..112c1f9 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ extern struct sk_buff *__alloc_skb(unsigned int size,
+                                  gfp_t priority, int flags, int node);
+ extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+-static inline struct sk_buff *alloc_skb(unsigned int size,
++static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
+                                       gfp_t priority)
+ {
+       return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
+@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
+  */
+ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ {
+-      return list->next == (struct sk_buff *)list;
++      return list->next == (const struct sk_buff *)list;
+ }
+ /**
+@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+                                    const struct sk_buff *skb)
+ {
+-      return skb->next == (struct sk_buff *)list;
++      return skb->next == (const struct sk_buff *)list;
+ }
+ /**
+@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+                                     const struct sk_buff *skb)
+ {
+-      return skb->prev == (struct sk_buff *)list;
++      return skb->prev == (const struct sk_buff *)list;
+ }
+ /**
+@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+  */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD   max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD   max(_AC(32,UL), L1_CACHE_BYTES)
+ #endif
+ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+                                        int noblock, int *err);
+ extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
+                                    struct poll_table_struct *wait);
+-extern int           skb_copy_datagram_iovec(const struct sk_buff *from,
++extern int           __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
+                                              int offset, struct iovec *to,
+                                              int size);
+ extern int           skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
+       nf_bridge_put(skb->nf_bridge);
+       skb->nf_bridge = NULL;
+ #endif
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
++      skb->nf_trace = 0;
++#endif
+ }
+ static inline void nf_reset_trace(struct sk_buff *skb)
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 0c62175..f016ac1 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -12,15 +12,29 @@
+ #include <linux/gfp.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+-
++#include <linux/err.h>
+ /*
+  * Flags to pass to kmem_cache_create().
+  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+  */
+ #define SLAB_DEBUG_FREE               0x00000100UL    /* DEBUG: Perform (expensive) checks on free */
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define SLAB_USERCOPY         0x00000200UL    /* PaX: Allow copying objs to/from userland */
++#else
++#define SLAB_USERCOPY         0x00000000UL
++#endif
++
+ #define SLAB_RED_ZONE         0x00000400UL    /* DEBUG: Red zone objs in a cache */
+ #define SLAB_POISON           0x00000800UL    /* DEBUG: Poison objects */
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#define SLAB_NO_SANITIZE      0x00001000UL    /* PaX: Do not sanitize objs on free */
++#else
++#define SLAB_NO_SANITIZE      0x00000000UL
++#endif
++
+ #define SLAB_HWCACHE_ALIGN    0x00002000UL    /* Align objs on cache lines */
+ #define SLAB_CACHE_DMA                0x00004000UL    /* Use GFP_DMA memory */
+ #define SLAB_STORE_USER               0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
+@@ -89,10 +103,13 @@
+  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+  * Both make kfree a no-op.
+  */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR                         \
++({                                            \
++      BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++      (void *)(-MAX_ERRNO-1L);                \
++})
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+-                              (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
+ struct mem_cgroup;
+@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++const char *check_heap_object(const void *ptr, unsigned long n);
++bool is_usercopy_object(const void *ptr);
+ /*
+  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+@@ -164,7 +183,7 @@ struct kmem_cache {
+       unsigned int align;     /* Alignment as calculated */
+       unsigned long flags;    /* Active flags on the slab */
+       const char *name;       /* Slab name for sysfs */
+-      int refcount;           /* Use counter */
++      atomic_t refcount;      /* Use counter */
+       void (*ctor)(void *);   /* Called on object slot creation */
+       struct list_head list;  /* List of all slab caches on the system */
+ };
+@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+ #endif
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
++#endif
++
+ /*
+  * Figure out which kmalloc slab an allocation of a certain size
+  * belongs to.
+@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+  * 2 = 120 .. 192 bytes
+  * n = 2^(n-1) .. 2^n -1
+  */
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
+ {
+       if (!size)
+               return 0;
+@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
+  * for general use, and so are not documented here. For a full list of
+  * potential flags, always refer to linux/gfp.h.
+  */
++
+ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+       if (size != 0 && n > SIZE_MAX / size)
+@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+       (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
+       (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
+-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
++extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
+ #define kmalloc_track_caller(size, flags) \
+       __kmalloc_track_caller(size, flags, _RET_IP_)
+ #else
+@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+       (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
+       (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
+-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
++extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
+ #define kmalloc_node_track_caller(size, flags, node) \
+       __kmalloc_node_track_caller(size, flags, node, \
+                       _RET_IP_)
+diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
+index cd40158..4e2f7af 100644
+--- a/include/linux/slab_def.h
++++ b/include/linux/slab_def.h
+@@ -50,7 +50,7 @@ struct kmem_cache {
+ /* 4) cache creation/removal */
+       const char *name;
+       struct list_head list;
+-      int refcount;
++      atomic_t refcount;
+       int object_size;
+       int align;
+@@ -66,10 +66,14 @@ struct kmem_cache {
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       unsigned long node_overflow;
+-      atomic_t allochit;
+-      atomic_t allocmiss;
+-      atomic_t freehit;
+-      atomic_t freemiss;
++      atomic_unchecked_t allochit;
++      atomic_unchecked_t allocmiss;
++      atomic_unchecked_t freehit;
++      atomic_unchecked_t freemiss;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      atomic_unchecked_t sanitized;
++      atomic_unchecked_t not_sanitized;
++#endif
+       /*
+        * If debugging is enabled, then the allocator can add additional
+@@ -103,7 +107,7 @@ struct kmem_cache {
+ };
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+-void *__kmalloc(size_t size, gfp_t flags);
++void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
+ #ifdef CONFIG_TRACING
+ extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+                       cachep = kmalloc_dma_caches[i];
+               else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++              if (flags & GFP_USERCOPY)
++                      cachep = kmalloc_usercopy_caches[i];
++              else
++#endif
++
+                       cachep = kmalloc_caches[i];
+               ret = kmem_cache_alloc_trace(cachep, flags, size);
+@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+ }
+ #ifdef CONFIG_NUMA
+-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
++extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+ #ifdef CONFIG_TRACING
+@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+                       cachep = kmalloc_dma_caches[i];
+               else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++              if (flags & GFP_USERCOPY)
++                      cachep = kmalloc_usercopy_caches[i];
++              else
++#endif
++
+                       cachep = kmalloc_caches[i];
+               return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
+index f28e14a..7831211 100644
+--- a/include/linux/slob_def.h
++++ b/include/linux/slob_def.h
+@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+       return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
+ }
+-void *__kmalloc_node(size_t size, gfp_t flags, int node);
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+       return __kmalloc_node(size, flags, NUMA_NO_NODE);
+ }
+-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
+ {
+       return kmalloc(size, flags);
+ }
+diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
+index 027276f..092bfe8 100644
+--- a/include/linux/slub_def.h
++++ b/include/linux/slub_def.h
+@@ -80,7 +80,7 @@ struct kmem_cache {
+       struct kmem_cache_order_objects max;
+       struct kmem_cache_order_objects min;
+       gfp_t allocflags;       /* gfp flags to use on each alloc */
+-      int refcount;           /* Refcount for slab cache destroy */
++      atomic_t refcount;      /* Refcount for slab cache destroy */
+       void (*ctor)(void *);
+       int inuse;              /* Offset to metadata */
+       int align;              /* Alignment */
+@@ -105,7 +105,7 @@ struct kmem_cache {
+ };
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+-void *__kmalloc(size_t size, gfp_t flags);
++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
+ static __always_inline void *
+ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+ }
+ #endif
+-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
+ {
+       unsigned int order = get_order(size);
+       return kmalloc_order_trace(size, flags, order);
+@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+ }
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t flags, int node);
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+ #ifdef CONFIG_TRACING
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index c848876..11e8a84 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) {  }
+ #endif
+ #define get_cpu()             ({ preempt_disable(); smp_processor_id(); })
++#define raw_get_cpu()         ({ raw_preempt_disable(); raw_smp_processor_id(); })
+ #define put_cpu()             preempt_enable()
++#define raw_put_cpu_no_resched()      raw_preempt_enable_no_resched()
+ /*
+  * Callback to arch code if there's nosmp or maxcpus=0 on the
+diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
+index 54f91d3..be2c379 100644
+--- a/include/linux/sock_diag.h
++++ b/include/linux/sock_diag.h
+@@ -11,7 +11,7 @@ struct sock;
+ struct sock_diag_handler {
+       __u8 family;
+       int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+-};
++} __do_const;
+ int sock_diag_register(const struct sock_diag_handler *h);
+ void sock_diag_unregister(const struct sock_diag_handler *h);
+diff --git a/include/linux/sonet.h b/include/linux/sonet.h
+index 680f9a3..f13aeb0 100644
+--- a/include/linux/sonet.h
++++ b/include/linux/sonet.h
+@@ -7,7 +7,7 @@
+ #include <uapi/linux/sonet.h>
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
+index 07d8e53..dc934c9 100644
+--- a/include/linux/sunrpc/addr.h
++++ b/include/linux/sunrpc/addr.h
+@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+ {
+       switch (sap->sa_family) {
+       case AF_INET:
+-              return ntohs(((struct sockaddr_in *)sap)->sin_port);
++              return ntohs(((const struct sockaddr_in *)sap)->sin_port);
+       case AF_INET6:
+-              return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
++              return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
+       }
+       return 0;
+ }
+@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+                                   const struct sockaddr *src)
+ {
+-      const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
++      const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
+       struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+       dsin->sin_family = ssin->sin_family;
+@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+       if (sa->sa_family != AF_INET6)
+               return 0;
+-      return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
++      return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
+ }
+ #endif /* _LINUX_SUNRPC_ADDR_H */
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index bfe11be..12bc8c4 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -96,7 +96,7 @@ struct rpc_procinfo {
+       unsigned int            p_timer;        /* Which RTT timer to use */
+       u32                     p_statidx;      /* Which procedure to account */
+       const char *            p_name;         /* name of procedure */
+-};
++} __do_const;
+ #ifdef __KERNEL__
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 1f0216b..6a4fa50 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -411,7 +411,7 @@ struct svc_procedure {
+       unsigned int            pc_count;       /* call count */
+       unsigned int            pc_cachetype;   /* cache info (NFS) */
+       unsigned int            pc_xdrressize;  /* maximum size of XDR reply */
+-};
++} __do_const;
+ /*
+  * Function prototypes.
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index 0b8e3e6..33e0a01 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
+ extern unsigned int svcrdma_max_requests;
+ extern unsigned int svcrdma_max_req_size;
+-extern atomic_t rdma_stat_recv;
+-extern atomic_t rdma_stat_read;
+-extern atomic_t rdma_stat_write;
+-extern atomic_t rdma_stat_sq_starve;
+-extern atomic_t rdma_stat_rq_starve;
+-extern atomic_t rdma_stat_rq_poll;
+-extern atomic_t rdma_stat_rq_prod;
+-extern atomic_t rdma_stat_sq_poll;
+-extern atomic_t rdma_stat_sq_prod;
++extern atomic_unchecked_t rdma_stat_recv;
++extern atomic_unchecked_t rdma_stat_read;
++extern atomic_unchecked_t rdma_stat_write;
++extern atomic_unchecked_t rdma_stat_sq_starve;
++extern atomic_unchecked_t rdma_stat_rq_starve;
++extern atomic_unchecked_t rdma_stat_rq_poll;
++extern atomic_unchecked_t rdma_stat_rq_prod;
++extern atomic_unchecked_t rdma_stat_sq_poll;
++extern atomic_unchecked_t rdma_stat_sq_prod;
+ #define RPCRDMA_VERSION 1
+diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
+index ff374ab..7fd2ecb 100644
+--- a/include/linux/sunrpc/svcauth.h
++++ b/include/linux/sunrpc/svcauth.h
+@@ -109,7 +109,7 @@ struct auth_ops {
+       int     (*release)(struct svc_rqst *rq);
+       void    (*domain_release)(struct auth_domain *);
+       int     (*set_client)(struct svc_rqst *rq);
+-};
++} __do_const;
+ #define       SVC_GARBAGE     1
+ #define       SVC_SYSERR      2
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index a5ffd32..0935dea 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -60,7 +60,8 @@ extern void
+ extern void
+ swiotlb_free_coherent(struct device *hwdev, size_t size,
+-                    void *vaddr, dma_addr_t dma_handle);
++                    void *vaddr, dma_addr_t dma_handle,
++                    struct dma_attrs *attrs);
+ extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size,
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 84662ec..d8f8adb 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -97,8 +97,12 @@ struct sigaltstack;
+ #define __MAP(n,...) __MAP##n(__VA_ARGS__)
+ #define __SC_DECL(t, a)       t a
+-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
++#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
++#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
++#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
++#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
++#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
++#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
+ #define __SC_CAST(t, a)       (t) a
+ #define __SC_ARGS(t, a)       a
+ #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
+ asmlinkage long sys_fsync(unsigned int fd);
+ asmlinkage long sys_fdatasync(unsigned int fd);
+ asmlinkage long sys_bdflush(int func, long data);
+-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
+-                              char __user *type, unsigned long flags,
++asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
++                              const char __user *type, unsigned long flags,
+                               void __user *data);
+-asmlinkage long sys_umount(char __user *name, int flags);
+-asmlinkage long sys_oldumount(char __user *name);
++asmlinkage long sys_umount(const char __user *name, int flags);
++asmlinkage long sys_oldumount(const char __user *name);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+ asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+ asmlinkage long sys_stat(const char __user *filename,
+@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
+ asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
+-                              struct sockaddr __user *, int);
++                              struct sockaddr __user *, int) __intentional_overflow(0);
+ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+ asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
+                            unsigned int vlen, unsigned flags);
+diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
+index 27b3b0b..e093dd9 100644
+--- a/include/linux/syscore_ops.h
++++ b/include/linux/syscore_ops.h
+@@ -16,7 +16,7 @@ struct syscore_ops {
+       int (*suspend)(void);
+       void (*resume)(void);
+       void (*shutdown)(void);
+-};
++} __do_const;
+ extern void register_syscore_ops(struct syscore_ops *ops);
+ extern void unregister_syscore_ops(struct syscore_ops *ops);
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 14a8ff2..af52bad 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -34,13 +34,13 @@ struct ctl_table_root;
+ struct ctl_table_header;
+ struct ctl_dir;
+-typedef struct ctl_table ctl_table;
+-
+ typedef int proc_handler (struct ctl_table *ctl, int write,
+                         void __user *buffer, size_t *lenp, loff_t *ppos);
+ extern int proc_dostring(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
++extern int proc_dostring_modpriv(struct ctl_table *, int,
++                       void __user *, size_t *, loff_t *);
+ extern int proc_dointvec(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_minmax(struct ctl_table *, int,
+@@ -115,7 +115,9 @@ struct ctl_table
+       struct ctl_table_poll *poll;
+       void *extra1;
+       void *extra2;
+-};
++} __do_const;
++typedef struct ctl_table __no_const ctl_table_no_const;
++typedef struct ctl_table ctl_table;
+ struct ctl_node {
+       struct rb_node node;
+diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
+index e2cee22..3ddb921 100644
+--- a/include/linux/sysfs.h
++++ b/include/linux/sysfs.h
+@@ -31,7 +31,8 @@ struct attribute {
+       struct lock_class_key   *key;
+       struct lock_class_key   skey;
+ #endif
+-};
++} __do_const;
++typedef struct attribute __no_const attribute_no_const;
+ /**
+  *    sysfs_attr_init - initialize a dynamically allocated sysfs attribute
+@@ -59,8 +60,8 @@ struct attribute_group {
+       umode_t                 (*is_visible)(struct kobject *,
+                                             struct attribute *, int);
+       struct attribute        **attrs;
+-};
+-
++} __do_const;
++typedef struct attribute_group __no_const attribute_group_no_const;
+ /**
+@@ -107,7 +108,8 @@ struct bin_attribute {
+                        char *, loff_t, size_t);
+       int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+                   struct vm_area_struct *vma);
+-};
++} __do_const;
++typedef struct bin_attribute __no_const bin_attribute_no_const;
+ /**
+  *    sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
+diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
+index 7faf933..9b85a0c 100644
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -16,6 +16,7 @@
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/compiler.h>
+ /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
+ #define SYSRQ_DEFAULT_ENABLE  1
+@@ -36,7 +37,7 @@ struct sysrq_key_op {
+       char *help_msg;
+       char *action_msg;
+       int enable_mask;
+-};
++} __do_const;
+ #ifdef CONFIG_MAGIC_SYSRQ
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index e7e0473..7989295 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
+ #error "no set_restore_sigmask() provided and default one won't work"
+ #endif
++extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
++static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
++{
++#ifndef CONFIG_PAX_USERCOPY_DEBUG
++      if (!__builtin_constant_p(n))
++#endif
++              __check_object_size(ptr, n, to_user);
++}
++
+ #endif        /* __KERNEL__ */
+ #endif /* _LINUX_THREAD_INFO_H */
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 8780bd2..d1ae08b 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -194,7 +194,7 @@ struct tty_port {
+       const struct tty_port_operations *ops;  /* Port operations */
+       spinlock_t              lock;           /* Lock protecting tty field */
+       int                     blocked_open;   /* Waiting to open */
+-      int                     count;          /* Usage count */
++      atomic_t                count;          /* Usage count */
+       wait_queue_head_t       open_wait;      /* Open waiters */
+       wait_queue_head_t       close_wait;     /* Close waiters */
+       wait_queue_head_t       delta_msr_wait; /* Modem status change */
+@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
+                               struct tty_struct *tty, struct file *filp);
+ static inline int tty_port_users(struct tty_port *port)
+ {
+-      return port->count + port->blocked_open;
++      return atomic_read(&port->count) + port->blocked_open;
+ }
+ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index 756a609..b302dd6 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -285,7 +285,7 @@ struct tty_operations {
+       void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+ #endif
+       const struct file_operations *proc_fops;
+-};
++} __do_const;
+ struct tty_driver {
+       int     magic;          /* magic number for this structure */
+diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
+index 58390c7..95e214c 100644
+--- a/include/linux/tty_ldisc.h
++++ b/include/linux/tty_ldisc.h
+@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
+       struct  module *owner;
+-      int refcount;
++      atomic_t refcount;
+ };
+ struct tty_ldisc {
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 4d118ba..c3ee9bf 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -176,10 +176,26 @@ typedef struct {
+       int counter;
+ } atomic_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+       long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+ struct list_head {
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 5ca0951..ab496a5 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
+               long ret;                               \
+               mm_segment_t old_fs = get_fs();         \
+                                                       \
+-              set_fs(KERNEL_DS);                      \
+               pagefault_disable();                    \
+-              ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));            \
+-              pagefault_enable();                     \
++              set_fs(KERNEL_DS);                      \
++              ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval));              \
+               set_fs(old_fs);                         \
++              pagefault_enable();                     \
+               ret;                                    \
+       })
+diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
+index 8e522cbc..aa8572d 100644
+--- a/include/linux/uidgid.h
++++ b/include/linux/uidgid.h
+@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+ #endif /* CONFIG_USER_NS */
++#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
++#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
++#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
++#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
++
+ #endif /* _LINUX_UIDGID_H */
+diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
+index 99c1b4d..562e6f3 100644
+--- a/include/linux/unaligned/access_ok.h
++++ b/include/linux/unaligned/access_ok.h
+@@ -4,34 +4,34 @@
+ #include <linux/kernel.h>
+ #include <asm/byteorder.h>
+-static inline u16 get_unaligned_le16(const void *p)
++static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
+ {
+-      return le16_to_cpup((__le16 *)p);
++      return le16_to_cpup((const __le16 *)p);
+ }
+-static inline u32 get_unaligned_le32(const void *p)
++static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
+ {
+-      return le32_to_cpup((__le32 *)p);
++      return le32_to_cpup((const __le32 *)p);
+ }
+-static inline u64 get_unaligned_le64(const void *p)
++static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
+ {
+-      return le64_to_cpup((__le64 *)p);
++      return le64_to_cpup((const __le64 *)p);
+ }
+-static inline u16 get_unaligned_be16(const void *p)
++static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
+ {
+-      return be16_to_cpup((__be16 *)p);
++      return be16_to_cpup((const __be16 *)p);
+ }
+-static inline u32 get_unaligned_be32(const void *p)
++static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
+ {
+-      return be32_to_cpup((__be32 *)p);
++      return be32_to_cpup((const __be32 *)p);
+ }
+-static inline u64 get_unaligned_be64(const void *p)
++static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
+ {
+-      return be64_to_cpup((__be64 *)p);
++      return be64_to_cpup((const __be64 *)p);
+ }
+ static inline void put_unaligned_le16(u16 val, void *p)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index a0bee5a..5533a52 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -552,7 +552,7 @@ struct usb_device {
+       int maxchild;
+       u32 quirks;
+-      atomic_t urbnum;
++      atomic_unchecked_t urbnum;
+       unsigned long active_duration;
+@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+ extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
+       __u8 request, __u8 requesttype, __u16 value, __u16 index,
+-      void *data, __u16 size, int timeout);
++      void *data, __u16 size, int timeout) __intentional_overflow(-1);
+ extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+       void *data, int len, int *actual_length, int timeout);
+ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
+index e452ba6..78f8e80 100644
+--- a/include/linux/usb/renesas_usbhs.h
++++ b/include/linux/usb/renesas_usbhs.h
+@@ -39,7 +39,7 @@ enum {
+  */
+ struct renesas_usbhs_driver_callback {
+       int (*notify_hotplug)(struct platform_device *pdev);
+-};
++} __no_const;
+ /*
+  * callback functions for platform
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index 6f8fbcf..8259001 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -25,9 +25,35 @@
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
++#ifdef CONFIG_PAX_REFCOUNT
++#define MODULE_PAX_REFCOUNT "REFCOUNT "
++#else
++#define MODULE_PAX_REFCOUNT ""
++#endif
++
++#ifdef CONSTIFY_PLUGIN
++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
++#else
++#define MODULE_CONSTIFY_PLUGIN ""
++#endif
++
++#ifdef STACKLEAK_PLUGIN
++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
++#else
++#define MODULE_STACKLEAK_PLUGIN ""
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSEC "
++#else
++#define MODULE_GRSEC ""
++#endif
++
+ #define VERMAGIC_STRING                                               \
+       UTS_RELEASE " "                                                 \
+       MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
+       MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
+-      MODULE_ARCH_VERMAGIC
++      MODULE_ARCH_VERMAGIC                                            \
++      MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
++      MODULE_GRSEC
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 7d5773a..541c01c 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -16,6 +16,11 @@ struct vm_area_struct;              /* vma defining user mapping in mm_types.h */
+ #define VM_USERMAP    0x00000008      /* suitable for remap_vmalloc_range */
+ #define VM_VPAGES     0x00000010      /* buffer for pages was vmalloc'ed */
+ #define VM_UNLIST     0x00000020      /* vm_struct is not listed in vmlist */
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC   0x00000040      /* allocate from executable kernel memory range */
++#endif
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+ /*
+@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+                       unsigned long start, unsigned long end, gfp_t gfp_mask,
+-                      pgprot_t prot, int node, const void *caller);
++                      pgprot_t prot, int node, const void *caller) __size_overflow(1);
+ extern void vfree(const void *addr);
+ extern void *vmap(struct page **pages, unsigned int count,
+@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
+ extern void free_vm_area(struct vm_struct *area);
+ /* for /dev/kmem */
+-extern long vread(char *buf, char *addr, unsigned long count);
+-extern long vwrite(char *buf, char *addr, unsigned long count);
++extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
+ /*
+  *    Internals.  Dont't use..
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index c586679..f06b389 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
+ /*
+  * Zone based page accounting with per cpu differentials.
+  */
+-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ static inline void zone_page_state_add(long x, struct zone *zone,
+                                enum zone_stat_item item)
+ {
+-      atomic_long_add(x, &zone->vm_stat[item]);
+-      atomic_long_add(x, &vm_stat[item]);
++      atomic_long_add_unchecked(x, &zone->vm_stat[item]);
++      atomic_long_add_unchecked(x, &vm_stat[item]);
+ }
+ static inline unsigned long global_page_state(enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&vm_stat[item]);
++      long x = atomic_long_read_unchecked(&vm_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
+ static inline unsigned long zone_page_state(struct zone *zone,
+                                       enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&zone->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
+ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&zone->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       int cpu;
+@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
+ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+-      atomic_long_inc(&zone->vm_stat[item]);
+-      atomic_long_inc(&vm_stat[item]);
++      atomic_long_inc_unchecked(&zone->vm_stat[item]);
++      atomic_long_inc_unchecked(&vm_stat[item]);
+ }
+ static inline void __inc_zone_page_state(struct page *page,
+@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
+ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+-      atomic_long_dec(&zone->vm_stat[item]);
+-      atomic_long_dec(&vm_stat[item]);
++      atomic_long_dec_unchecked(&zone->vm_stat[item]);
++      atomic_long_dec_unchecked(&vm_stat[item]);
+ }
+ static inline void __dec_zone_page_state(struct page *page,
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index fdbafc6..49dfe4f 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -28,7 +28,7 @@ struct xattr_handler {
+                  size_t size, int handler_flags);
+       int (*set)(struct dentry *dentry, const char *name, const void *buffer,
+                  size_t size, int flags, int handler_flags);
+-};
++} __do_const;
+ struct xattr {
+       char *name;
+@@ -37,6 +37,9 @@ struct xattr {
+ };
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t pax_getxattr(struct dentry *, void *, size_t);
++#endif
+ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+index 9c5a6b4..09c9438 100644
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -31,6 +31,7 @@
+ #define _ZLIB_H
+ #include <linux/zconf.h>
++#include <linux/compiler.h>
+ /* zlib deflate based on ZLIB_VERSION "1.1.3" */
+ /* zlib inflate based on ZLIB_VERSION "1.2.3" */
+@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
+                         /* basic functions */
+-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
++extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
+ /*
+    Returns the number of bytes that needs to be allocated for a per-
+    stream workspace with the specified parameters.  A pointer to this
+diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
+index 95d1c91..6798cca 100644
+--- a/include/media/v4l2-dev.h
++++ b/include/media/v4l2-dev.h
+@@ -76,7 +76,7 @@ struct v4l2_file_operations {
+       int (*mmap) (struct file *, struct vm_area_struct *);
+       int (*open) (struct file *);
+       int (*release) (struct file *);
+-};
++} __do_const;
+ /*
+  * Newer version of video_device, handled by videodev2.c
+diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
+index adcbb20..62c2559 100644
+--- a/include/net/9p/transport.h
++++ b/include/net/9p/transport.h
+@@ -57,7 +57,7 @@ struct p9_trans_module {
+       int (*cancel) (struct p9_client *, struct p9_req_t *req);
+       int (*zc_request)(struct p9_client *, struct p9_req_t *,
+                         char *, char *, int , int, int, int);
+-};
++} __do_const;
+ void v9fs_register_trans(struct p9_trans_module *m);
+ void v9fs_unregister_trans(struct p9_trans_module *m);
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index fb94cf1..7c0c987 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -551,7 +551,7 @@ struct l2cap_ops {
+       void                    (*defer) (struct l2cap_chan *chan);
+       struct sk_buff          *(*alloc_skb) (struct l2cap_chan *chan,
+                                              unsigned long len, int nb);
+-};
++} __do_const;
+ struct l2cap_conn {
+       struct hci_conn         *hcon;
+diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
+index f2ae33d..c457cf0 100644
+--- a/include/net/caif/cfctrl.h
++++ b/include/net/caif/cfctrl.h
+@@ -52,7 +52,7 @@ struct cfctrl_rsp {
+       void (*radioset_rsp)(void);
+       void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+                               struct cflayer *client_layer);
+-};
++} __no_const;
+ /* Link Setup Parameters for CAIF-Links. */
+ struct cfctrl_link_param {
+@@ -101,8 +101,8 @@ struct cfctrl_request_info {
+ struct cfctrl {
+       struct cfsrvl serv;
+       struct cfctrl_rsp res;
+-      atomic_t req_seq_no;
+-      atomic_t rsp_seq_no;
++      atomic_unchecked_t req_seq_no;
++      atomic_unchecked_t rsp_seq_no;
+       struct list_head list;
+       /* Protects from simultaneous access to first_req list */
+       spinlock_t info_list_lock;
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 628e11b..4c475df 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
+ extern void flow_cache_flush(void);
+ extern void flow_cache_flush_deferred(void);
+-extern atomic_t flow_cache_genid;
++extern atomic_unchecked_t flow_cache_genid;
+ #endif
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 93024a4..eeb6b6e 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -119,7 +119,7 @@ struct genl_ops {
+                                        struct netlink_callback *cb);
+       int                    (*done)(struct netlink_callback *cb);
+       struct list_head        ops_list;
+-};
++} __do_const;
+ extern int genl_register_family(struct genl_family *family);
+ extern int genl_register_family_with_ops(struct genl_family *family,
+diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
+index 734d9b5..48a9a4b 100644
+--- a/include/net/gro_cells.h
++++ b/include/net/gro_cells.h
+@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
+               cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+-              atomic_long_inc(&dev->rx_dropped);
++              atomic_long_inc_unchecked(&dev->rx_dropped);
+               kfree_skb(skb);
+               return;
+       }
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index de2c785..0588a6b 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
+       void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+       int         (*bind_conflict)(const struct sock *sk,
+                                    const struct inet_bind_bucket *tb, bool relax);
+-};
++} __do_const;
+ /** inet_connection_sock - INET connection oriented sock
+  *
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 53f464d..ba76aaa 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -47,8 +47,8 @@ struct inet_peer {
+        */
+       union {
+               struct {
+-                      atomic_t                        rid;            /* Frag reception counter */
+-                      atomic_t                        ip_id_count;    /* IP ID for the next packet */
++                      atomic_unchecked_t              rid;            /* Frag reception counter */
++                      atomic_unchecked_t              ip_id_count;    /* IP ID for the next packet */
+               };
+               struct rcu_head         rcu;
+               struct inet_peer        *gc_next;
+@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
+       more++;
+       inet_peer_refcheck(p);
+       do {
+-              old = atomic_read(&p->ip_id_count);
++              old = atomic_read_unchecked(&p->ip_id_count);
+               new = old + more;
+               if (!new)
+                       new = 1;
+-      } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
++      } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
+       return new;
+ }
+diff --git a/include/net/ip.h b/include/net/ip.h
+index a68f838..74518ab 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -202,7 +202,7 @@ extern struct local_ports {
+ } sysctl_local_ports;
+ extern void inet_get_local_port_range(int *low, int *high);
+-extern unsigned long *sysctl_local_reserved_ports;
++extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
+ static inline int inet_is_reserved_local_port(int port)
+ {
+       return test_bit(port, sysctl_local_reserved_ports);
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index e49db91..76a81de 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+ #define FIB_RES_SADDR(net, res)                               \
+       ((FIB_RES_NH(res).nh_saddr_genid ==             \
+-        atomic_read(&(net)->ipv4.dev_addr_genid)) ?   \
++        atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
+        FIB_RES_NH(res).nh_saddr :                     \
+        fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
+ #define FIB_RES_GW(res)                       (FIB_RES_NH(res).nh_gw)
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 4c062cc..3562c31 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -612,7 +612,7 @@ struct ip_vs_conn {
+       struct ip_vs_conn       *control;       /* Master control connection */
+       atomic_t                n_control;      /* Number of controlled ones */
+       struct ip_vs_dest       *dest;          /* real server */
+-      atomic_t                in_pkts;        /* incoming packet counter */
++      atomic_unchecked_t      in_pkts;        /* incoming packet counter */
+       /* packet transmitter for different forwarding methods.  If it
+          mangles the packet, it must return NF_DROP or better NF_STOLEN,
+@@ -761,7 +761,7 @@ struct ip_vs_dest {
+       __be16                  port;           /* port number of the server */
+       union nf_inet_addr      addr;           /* IP address of the server */
+       volatile unsigned int   flags;          /* dest status flags */
+-      atomic_t                conn_flags;     /* flags to copy to conn */
++      atomic_unchecked_t      conn_flags;     /* flags to copy to conn */
+       atomic_t                weight;         /* server weight */
+       atomic_t                refcnt;         /* reference counter */
+@@ -1013,11 +1013,11 @@ struct netns_ipvs {
+       /* ip_vs_lblc */
+       int                     sysctl_lblc_expiration;
+       struct ctl_table_header *lblc_ctl_header;
+-      struct ctl_table        *lblc_ctl_table;
++      ctl_table_no_const      *lblc_ctl_table;
+       /* ip_vs_lblcr */
+       int                     sysctl_lblcr_expiration;
+       struct ctl_table_header *lblcr_ctl_header;
+-      struct ctl_table        *lblcr_ctl_table;
++      ctl_table_no_const      *lblcr_ctl_table;
+       /* ip_vs_est */
+       struct list_head        est_list;       /* estimator list */
+       spinlock_t              est_lock;
+diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
+index 80ffde3..968b0f4 100644
+--- a/include/net/irda/ircomm_tty.h
++++ b/include/net/irda/ircomm_tty.h
+@@ -35,6 +35,7 @@
+ #include <linux/termios.h>
+ #include <linux/timer.h>
+ #include <linux/tty.h>                /* struct tty_struct */
++#include <asm/local.h>
+ #include <net/irda/irias_object.h>
+ #include <net/irda/ircomm_core.h>
+diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
+index 714cc9a..ea05f3e 100644
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -149,7 +149,7 @@ struct iucv_skb_cb {
+ struct iucv_sock_list {
+       struct hlist_head head;
+       rwlock_t          lock;
+-      atomic_t          autobind_name;
++      atomic_unchecked_t autobind_name;
+ };
+ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
+index df83f69..9b640b8 100644
+--- a/include/net/llc_c_ac.h
++++ b/include/net/llc_c_ac.h
+@@ -87,7 +87,7 @@
+ #define LLC_CONN_AC_STOP_SENDACK_TMR                  70
+ #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING  71
+-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
+index 6ca3113..f8026dd 100644
+--- a/include/net/llc_c_ev.h
++++ b/include/net/llc_c_ev.h
+@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
+       return (struct llc_conn_state_ev *)skb->cb;
+ }
+-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
+index 0e79cfb..f46db31 100644
+--- a/include/net/llc_c_st.h
++++ b/include/net/llc_c_st.h
+@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
+       u8                 next_state;
+       llc_conn_ev_qfyr_t *ev_qualifiers;
+       llc_conn_action_t  *ev_actions;
+-};
++} __do_const;
+ struct llc_conn_state {
+       u8                          current_state;
+diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
+index 37a3bbd..55a4241 100644
+--- a/include/net/llc_s_ac.h
++++ b/include/net/llc_s_ac.h
+@@ -23,7 +23,7 @@
+ #define SAP_ACT_TEST_IND      9
+ /* All action functions must look like this */
+-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
++typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+ extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
+                                      struct sk_buff *skb);
+diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
+index 567c681..cd73ac0 100644
+--- a/include/net/llc_s_st.h
++++ b/include/net/llc_s_st.h
+@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
+       llc_sap_ev_t      ev;
+       u8                next_state;
+       llc_sap_action_t *ev_actions;
+-};
++} __do_const;
+ struct llc_sap_state {
+       u8                         curr_state;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 885898a..cdace34 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -4205,7 +4205,7 @@ struct rate_control_ops {
+       void (*add_sta_debugfs)(void *priv, void *priv_sta,
+                               struct dentry *dir);
+       void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+-};
++} __do_const;
+ static inline int rate_supported(struct ieee80211_sta *sta,
+                                enum ieee80211_band band,
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 7e748ad..5c6229b 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -123,7 +123,7 @@ struct neigh_ops {
+       void                    (*error_report)(struct neighbour *, struct sk_buff *);
+       int                     (*output)(struct neighbour *, struct sk_buff *);
+       int                     (*connected_output)(struct neighbour *, struct sk_buff *);
+-};
++} __do_const;
+ struct pneigh_entry {
+       struct pneigh_entry     *next;
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index b176978..ea169f4 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -117,7 +117,7 @@ struct net {
+ #endif
+       struct netns_ipvs       *ipvs;
+       struct sock             *diag_nlsk;
+-      atomic_t                rt_genid;
++      atomic_unchecked_t      rt_genid;
+ };
+ /*
+@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
+ #define __net_init    __init
+ #define __net_exit    __exit_refok
+ #define __net_initdata        __initdata
++#ifdef CONSTIFY_PLUGIN
+ #define __net_initconst       __initconst
++#else
++#define __net_initconst       __initdata
++#endif
+ #endif
+ struct pernet_operations {
+@@ -284,7 +288,7 @@ struct pernet_operations {
+       void (*exit_batch)(struct list_head *net_exit_list);
+       int *id;
+       size_t size;
+-};
++} __do_const;
+ /*
+  * Use these carefully.  If you implement a network device and it
+@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+ static inline int rt_genid(struct net *net)
+ {
+-      return atomic_read(&net->rt_genid);
++      return atomic_read_unchecked(&net->rt_genid);
+ }
+ static inline void rt_genid_bump(struct net *net)
+ {
+-      atomic_inc(&net->rt_genid);
++      atomic_inc_unchecked(&net->rt_genid);
+ }
+ #endif /* __NET_NET_NAMESPACE_H */
+diff --git a/include/net/netdma.h b/include/net/netdma.h
+index 8ba8ce2..99b7fff 100644
+--- a/include/net/netdma.h
++++ b/include/net/netdma.h
+@@ -24,7 +24,7 @@
+ #include <linux/dmaengine.h>
+ #include <linux/skbuff.h>
+-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
++int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
+               struct sk_buff *skb, int offset, struct iovec *to,
+               size_t len, struct dma_pinned_list *pinned_list);
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index 9690b0f..87aded7 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
+ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+ {
+       if (mark)
+-              skb_trim(skb, (unsigned char *) mark - skb->data);
++              skb_trim(skb, (const unsigned char *) mark - skb->data);
+ }
+ /**
+diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
+index c9c0c53..53f24c3 100644
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
+ struct nf_proto_net {
+ #ifdef CONFIG_SYSCTL
+       struct ctl_table_header *ctl_table_header;
+-      struct ctl_table        *ctl_table;
++      ctl_table_no_const      *ctl_table;
+ #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+       struct ctl_table_header *ctl_compat_header;
+-      struct ctl_table        *ctl_compat_table;
++      ctl_table_no_const      *ctl_compat_table;
+ #endif
+ #endif
+       unsigned int            users;
+@@ -58,7 +58,7 @@ struct nf_ip_net {
+       struct nf_icmp_net      icmpv6;
+ #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+       struct ctl_table_header *ctl_table_header;
+-      struct ctl_table        *ctl_table;
++      ctl_table_no_const      *ctl_table;
+ #endif
+ };
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index 2ba9de8..47bd6c7 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -67,7 +67,7 @@ struct netns_ipv4 {
+       kgid_t sysctl_ping_group_range[2];
+       long sysctl_tcp_mem[3];
+-      atomic_t dev_addr_genid;
++      atomic_unchecked_t      dev_addr_genid;
+ #ifdef CONFIG_IP_MROUTE
+ #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index 005e2c2..023d340 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -71,7 +71,7 @@ struct netns_ipv6 {
+       struct fib_rules_ops    *mr6_rules_ops;
+ #endif
+ #endif
+-      atomic_t                dev_addr_genid;
++      atomic_unchecked_t      dev_addr_genid;
+ };
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 047c047..b9dad15 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -44,7 +44,7 @@ struct net_protocol {
+       void                    (*err_handler)(struct sk_buff *skb, u32 info);
+       unsigned int            no_policy:1,
+                               netns_ok:1;
+-};
++} __do_const;
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol {
+@@ -57,7 +57,7 @@ struct inet6_protocol {
+                              u8 type, u8 code, int offset,
+                              __be32 info);
+       unsigned int    flags;  /* INET6_PROTO_xxx */
+-};
++} __do_const;
+ #define INET6_PROTO_NOPOLICY  0x1
+ #define INET6_PROTO_FINAL     0x2
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 7026648..584cc8c 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -81,7 +81,7 @@ struct rtnl_link_ops {
+                                              const struct net_device *dev);
+       unsigned int            (*get_num_tx_queues)(void);
+       unsigned int            (*get_num_rx_queues)(void);
+-};
++} __do_const;
+ extern int    __rtnl_link_register(struct rtnl_link_ops *ops);
+ extern void   __rtnl_link_unregister(struct rtnl_link_ops *ops);
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index cd89510..d67810f 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -330,9 +330,9 @@ do {                                                                       \
+ #else /* SCTP_DEBUG */
+-#define SCTP_DEBUG_PRINTK(whatever...)
+-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
+-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
+ #define SCTP_ENABLE_DEBUG
+ #define SCTP_DISABLE_DEBUG
+ #define SCTP_ASSERT(expr, str, func)
+diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
+index 2a82d13..62a31c2 100644
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
+ typedef struct {
+       sctp_state_fn_t *fn;
+       const char *name;
+-} sctp_sm_table_entry_t;
++} __do_const sctp_sm_table_entry_t;
+ /* A naming convention of "sctp_sf_xxx" applies to all the state functions
+  * currently in use.
+@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
+ __u32 sctp_generate_tsn(const struct sctp_endpoint *);
+ /* Extern declarations for major data structures.  */
+-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
++extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+ /* Get the size of a DATA chunk payload. */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 1bd4c41..9250b5b 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -516,7 +516,7 @@ struct sctp_pf {
+                                         struct sctp_association *asoc);
+       void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
+       struct sctp_af *af;
+-};
++} __do_const;
+ /* Structure to track chunk fragments that have been acked, but peer
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 66772cf..25bc45b 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -325,7 +325,7 @@ struct sock {
+ #ifdef CONFIG_RPS
+       __u32                   sk_rxhash;
+ #endif
+-      atomic_t                sk_drops;
++      atomic_unchecked_t      sk_drops;
+       int                     sk_rcvbuf;
+       struct sk_filter __rcu  *sk_filter;
+@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
+ }
+ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+-                                         char __user *from, char *to,
++                                         char __user *from, unsigned char *to,
+                                          int copy, int offset)
+ {
+       if (skb->ip_summed == CHECKSUM_NONE) {
+@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+       }
+ }
+-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
++struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+ /**
+  * sk_page_frag - return an appropriate page_frag
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 5bba80f..8520a82 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
+ extern void tcp_xmit_retransmit_queue(struct sock *);
+ extern void tcp_simple_retransmit(struct sock *);
+ extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
++extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+ extern void tcp_send_probe0(struct sock *);
+ extern void tcp_send_partial(struct sock *);
+@@ -697,8 +697,8 @@ struct tcp_skb_cb {
+               struct inet6_skb_parm   h6;
+ #endif
+       } header;       /* For incoming frames          */
+-      __u32           seq;            /* Starting sequence number     */
+-      __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
++      __u32           seq __intentional_overflow(0);  /* Starting sequence number     */
++      __u32           end_seq __intentional_overflow(0);      /* SEQ + FIN + SYN + datalen    */
+       __u32           when;           /* used to compute rtt's        */
+       __u8            tcp_flags;      /* TCP header flags. (tcp[13])  */
+@@ -712,7 +712,7 @@ struct tcp_skb_cb {
+       __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
+       /* 1 byte hole */
+-      __u32           ack_seq;        /* Sequence number ACK'd        */
++      __u32           ack_seq __intentional_overflow(0);      /* Sequence number ACK'd        */
+ };
+ #define TCP_SKB_CB(__skb)     ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 94ce082..62b278d 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
+                                           struct net_device *dev,
+                                           const struct flowi *fl);
+       struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
+-};
++} __do_const;
+ extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+ extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
+                                                 struct sk_buff *skb);
+       int                     (*transport_finish)(struct sk_buff *skb,
+                                                   int async);
+-};
++} __do_const;
+ extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+ extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+@@ -424,7 +424,7 @@ struct xfrm_mode {
+       struct module *owner;
+       unsigned int encap;
+       int flags;
+-};
++} __do_const;
+ /* Flags for xfrm_mode. */
+ enum {
+@@ -521,7 +521,7 @@ struct xfrm_policy {
+       struct timer_list       timer;
+       struct flow_cache_object flo;
+-      atomic_t                genid;
++      atomic_unchecked_t      genid;
+       u32                     priority;
+       u32                     index;
+       struct xfrm_mark        mark;
+diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
+index 1a046b1..ee0bef0 100644
+--- a/include/rdma/iw_cm.h
++++ b/include/rdma/iw_cm.h
+@@ -122,7 +122,7 @@ struct iw_cm_verbs {
+                                        int backlog);
+       int             (*destroy_listen)(struct iw_cm_id *cm_id);
+-};
++} __no_const;
+ /**
+  * iw_create_cm_id - Create an IW CM identifier.
+diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
+index e1379b4..67eafbe 100644
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -762,6 +762,7 @@ struct libfc_function_template {
+        */
+       void (*disc_stop_final) (struct fc_lport *);
+ };
++typedef struct libfc_function_template __no_const libfc_function_template_no_const;
+ /**
+  * struct fc_disc - Discovery context
+@@ -866,7 +867,7 @@ struct fc_lport {
+       struct fc_vport                *vport;
+       /* Operational Information */
+-      struct libfc_function_template tt;
++      libfc_function_template_no_const tt;
+       u8                             link_up;
+       u8                             qfull;
+       enum fc_lport_state            state;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index cc64587..608f523 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -171,9 +171,9 @@ struct scsi_device {
+       unsigned int max_device_blocked; /* what device_blocked counts down from  */
+ #define SCSI_DEFAULT_DEVICE_BLOCKED   3
+-      atomic_t iorequest_cnt;
+-      atomic_t iodone_cnt;
+-      atomic_t ioerr_cnt;
++      atomic_unchecked_t iorequest_cnt;
++      atomic_unchecked_t iodone_cnt;
++      atomic_unchecked_t ioerr_cnt;
+       struct device           sdev_gendev,
+                               sdev_dev;
+diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
+index b797e8f..8e2c3aa 100644
+--- a/include/scsi/scsi_transport_fc.h
++++ b/include/scsi/scsi_transport_fc.h
+@@ -751,7 +751,8 @@ struct fc_function_template {
+       unsigned long   show_host_system_hostname:1;
+       unsigned long   disable_target_scan:1;
+-};
++} __do_const;
++typedef struct fc_function_template __no_const fc_function_template_no_const;
+ /**
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index 9031a26..750d592 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -128,7 +128,7 @@ struct snd_compr_ops {
+                       struct snd_compr_caps *caps);
+       int (*get_codec_caps) (struct snd_compr_stream *stream,
+                       struct snd_compr_codec_caps *codec);
+-};
++} __no_const;
+ /**
+  * struct snd_compr: Compressed device
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 85c1522..f44bad1 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
+       /* probe ordering - for components with runtime dependencies */
+       int probe_order;
+       int remove_order;
+-};
++} __do_const;
+ /* SoC platform interface */
+ struct snd_soc_platform_driver {
+@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
+       unsigned int (*read)(struct snd_soc_platform *, unsigned int);
+       int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
+       int (*bespoke_trigger)(struct snd_pcm_substream *, int);
+-};
++} __do_const;
+ struct snd_soc_platform {
+       const char *name;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 4ea4f98..a63629b 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -653,7 +653,7 @@ struct se_device {
+       spinlock_t              stats_lock;
+       /* Active commands on this virtual SE device */
+       atomic_t                simple_cmds;
+-      atomic_t                dev_ordered_id;
++      atomic_unchecked_t      dev_ordered_id;
+       atomic_t                dev_ordered_sync;
+       atomic_t                dev_qf_count;
+       int                     export_count;
+diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
+new file mode 100644
+index 0000000..fb634b7
+--- /dev/null
++++ b/include/trace/events/fs.h
+@@ -0,0 +1,53 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM fs
++
++#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_FS_H
++
++#include <linux/fs.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(do_sys_open,
++
++      TP_PROTO(const char *filename, int flags, int mode),
++
++      TP_ARGS(filename, flags, mode),
++
++      TP_STRUCT__entry(
++              __string(       filename, filename              )
++              __field(        int, flags                      )
++              __field(        int, mode                       )
++      ),
++
++      TP_fast_assign(
++              __assign_str(filename, filename);
++              __entry->flags = flags;
++              __entry->mode = mode;
++      ),
++
++      TP_printk("\"%s\" %x %o",
++                __get_str(filename), __entry->flags, __entry->mode)
++);
++
++TRACE_EVENT(open_exec,
++
++      TP_PROTO(const char *filename),
++
++      TP_ARGS(filename),
++
++      TP_STRUCT__entry(
++              __string(       filename, filename              )
++      ),
++
++      TP_fast_assign(
++              __assign_str(filename, filename);
++      ),
++
++      TP_printk("\"%s\"",
++                __get_str(filename))
++);
++
++#endif /* _TRACE_FS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
+index 1c09820..7f5ec79 100644
+--- a/include/trace/events/irq.h
++++ b/include/trace/events/irq.h
+@@ -36,7 +36,7 @@ struct softirq_action;
+  */
+ TRACE_EVENT(irq_handler_entry,
+-      TP_PROTO(int irq, struct irqaction *action),
++      TP_PROTO(int irq, const struct irqaction *action),
+       TP_ARGS(irq, action),
+@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
+  */
+ TRACE_EVENT(irq_handler_exit,
+-      TP_PROTO(int irq, struct irqaction *action, int ret),
++      TP_PROTO(int irq, const struct irqaction *action, int ret),
+       TP_ARGS(irq, action, ret),
+diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
+index 7caf44c..23c6f27 100644
+--- a/include/uapi/linux/a.out.h
++++ b/include/uapi/linux/a.out.h
+@@ -39,6 +39,14 @@ enum machine_type {
+   M_MIPS2 = 152               /* MIPS R6000/R4000 binary */
+ };
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC        1       /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP        2       /* Emulate trampolines */
++#define F_PAX_MPROTECT        4       /* Restrict mprotect() */
++#define F_PAX_RANDMMAP        8       /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC      16*/    /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC        32      /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
+index d876736..ccce5c0 100644
+--- a/include/uapi/linux/byteorder/little_endian.h
++++ b/include/uapi/linux/byteorder/little_endian.h
+@@ -42,51 +42,51 @@
+ static inline __le64 __cpu_to_le64p(const __u64 *p)
+ {
+-      return (__force __le64)*p;
++      return (__force const __le64)*p;
+ }
+-static inline __u64 __le64_to_cpup(const __le64 *p)
++static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
+ {
+-      return (__force __u64)*p;
++      return (__force const __u64)*p;
+ }
+ static inline __le32 __cpu_to_le32p(const __u32 *p)
+ {
+-      return (__force __le32)*p;
++      return (__force const __le32)*p;
+ }
+ static inline __u32 __le32_to_cpup(const __le32 *p)
+ {
+-      return (__force __u32)*p;
++      return (__force const __u32)*p;
+ }
+ static inline __le16 __cpu_to_le16p(const __u16 *p)
+ {
+-      return (__force __le16)*p;
++      return (__force const __le16)*p;
+ }
+ static inline __u16 __le16_to_cpup(const __le16 *p)
+ {
+-      return (__force __u16)*p;
++      return (__force const __u16)*p;
+ }
+ static inline __be64 __cpu_to_be64p(const __u64 *p)
+ {
+-      return (__force __be64)__swab64p(p);
++      return (__force const __be64)__swab64p(p);
+ }
+ static inline __u64 __be64_to_cpup(const __be64 *p)
+ {
+-      return __swab64p((__u64 *)p);
++      return __swab64p((const __u64 *)p);
+ }
+ static inline __be32 __cpu_to_be32p(const __u32 *p)
+ {
+-      return (__force __be32)__swab32p(p);
++      return (__force const __be32)__swab32p(p);
+ }
+-static inline __u32 __be32_to_cpup(const __be32 *p)
++static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
+ {
+-      return __swab32p((__u32 *)p);
++      return __swab32p((const __u32 *)p);
+ }
+ static inline __be16 __cpu_to_be16p(const __u16 *p)
+ {
+-      return (__force __be16)__swab16p(p);
++      return (__force const __be16)__swab16p(p);
+ }
+ static inline __u16 __be16_to_cpup(const __be16 *p)
+ {
+-      return __swab16p((__u16 *)p);
++      return __swab16p((const __u16 *)p);
+ }
+ #define __cpu_to_le64s(x) do { (void)(x); } while (0)
+ #define __le64_to_cpus(x) do { (void)(x); } while (0)
+diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
+index ef6103b..d4e65dd 100644
+--- a/include/uapi/linux/elf.h
++++ b/include/uapi/linux/elf.h
+@@ -37,6 +37,17 @@ typedef __s64       Elf64_Sxword;
+ #define PT_GNU_EH_FRAME               0x6474e550
+ #define PT_GNU_STACK  (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO  (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS  (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC               1       /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP               2       /* Emulate trampolines */
++#define EF_PAX_MPROTECT               4       /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP               8       /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC             16*/    /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC               32      /* Segmentation based non-executable pages */
+ /*
+  * Extended Numbering
+@@ -94,6 +105,8 @@ typedef __s64       Elf64_Sxword;
+ #define DT_DEBUG      21
+ #define DT_TEXTREL    22
+ #define DT_JMPREL     23
++#define DT_FLAGS      30
++  #define DF_TEXTREL  0x00000004
+ #define DT_ENCODING   32
+ #define OLD_DT_LOOS   0x60000000
+ #define DT_LOOS               0x6000000d
+@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
+ #define PF_W          0x2
+ #define PF_X          0x1
++#define PF_PAGEEXEC   (1U << 4)       /* Enable  PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5)       /* Disable PAGEEXEC */
++#define PF_SEGMEXEC   (1U << 6)       /* Enable  SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7)       /* Disable SEGMEXEC */
++#define PF_MPROTECT   (1U << 8)       /* Enable  MPROTECT */
++#define PF_NOMPROTECT (1U << 9)       /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/    /* Enable  RANDEXEC */
++/*#define PF_NORANDEXEC       (1U << 11)*/    /* Disable RANDEXEC */
++#define PF_EMUTRAMP   (1U << 12)      /* Enable  EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13)      /* Disable EMUTRAMP */
++#define PF_RANDMMAP   (1U << 14)      /* Enable  RANDMMAP */
++#define PF_NORANDMMAP (1U << 15)      /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+   Elf32_Word  p_type;
+   Elf32_Off   p_offset;
+@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
+ #define       EI_OSABI        7
+ #define       EI_PAD          8
++#define       EI_PAX          14
++
+ #define       ELFMAG0         0x7f            /* EI_MAG */
+ #define       ELFMAG1         'E'
+ #define       ELFMAG2         'L'
+diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
+index aa169c4..6a2771d 100644
+--- a/include/uapi/linux/personality.h
++++ b/include/uapi/linux/personality.h
+@@ -30,6 +30,7 @@ enum {
+ #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC  | \
+                           ADDR_NO_RANDOMIZE  | \
+                           ADDR_COMPAT_LAYOUT | \
++                          ADDR_LIMIT_3GB     | \
+                           MMAP_PAGE_ZERO)
+ /*
+diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
+index 7530e74..e714828 100644
+--- a/include/uapi/linux/screen_info.h
++++ b/include/uapi/linux/screen_info.h
+@@ -43,7 +43,8 @@ struct screen_info {
+       __u16 pages;            /* 0x32 */
+       __u16 vesa_attributes;  /* 0x34 */
+       __u32 capabilities;     /* 0x36 */
+-      __u8  _reserved[6];     /* 0x3a */
++      __u16 vesapm_size;      /* 0x3a */
++      __u8  _reserved[4];     /* 0x3c */
+ } __attribute__((packed));
+ #define VIDEO_TYPE_MDA                0x10    /* Monochrome Text Display      */
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
+index 0e011eb..82681b1 100644
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -43,7 +43,7 @@
+  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+  */
+-static inline __attribute_const__ __u16 __fswab16(__u16 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
+ {
+ #ifdef __HAVE_BUILTIN_BSWAP16__
+       return __builtin_bswap16(val);
+@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
+ #endif
+ }
+-static inline __attribute_const__ __u32 __fswab32(__u32 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
+ {
+ #ifdef __HAVE_BUILTIN_BSWAP32__
+       return __builtin_bswap32(val);
+@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
+ #endif
+ }
+-static inline __attribute_const__ __u64 __fswab64(__u64 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
+ {
+ #ifdef __HAVE_BUILTIN_BSWAP64__
+       return __builtin_bswap64(val);
+diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
+index 6d67213..8dab561 100644
+--- a/include/uapi/linux/sysctl.h
++++ b/include/uapi/linux/sysctl.h
+@@ -155,7 +155,11 @@ enum
+       KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ };
+-
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++      PAX_SOFTMODE=1          /* PaX: disable/enable soft mode */
++};
++#endif
+ /* CTL_VM names: */
+ enum
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index e4629b9..6958086 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -63,5 +63,9 @@
+ #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/include/video/udlfb.h b/include/video/udlfb.h
+index f9466fa..f4e2b81 100644
+--- a/include/video/udlfb.h
++++ b/include/video/udlfb.h
+@@ -53,10 +53,10 @@ struct dlfb_data {
+       u32 pseudo_palette[256];
+       int blank_mode; /*one of FB_BLANK_ */
+       /* blit-only rendering path metrics, exposed through sysfs */
+-      atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+-      atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+-      atomic_t bytes_sent; /* to usb, after compression including overhead */
+-      atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++      atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++      atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
++      atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
++      atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02
+diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
+index 1a91850..28573f8 100644
+--- a/include/video/uvesafb.h
++++ b/include/video/uvesafb.h
+@@ -122,6 +122,7 @@ struct uvesafb_par {
+       u8 ypan;                        /* 0 - nothing, 1 - ypan, 2 - ywrap */
+       u8 pmi_setpal;                  /* PMI for palette changes */
+       u16 *pmi_base;                  /* protected mode interface location */
++      u8 *pmi_code;                   /* protected mode code location */
+       void *pmi_start;
+       void *pmi_pal;
+       u8 *vbe_state_orig;             /*
+diff --git a/init/Kconfig b/init/Kconfig
+index 2d9b831..ae4c8ac 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1029,6 +1029,7 @@ endif # CGROUPS
+ config CHECKPOINT_RESTORE
+       bool "Checkpoint/restore support" if EXPERT
++      depends on !GRKERNSEC
+       default n
+       help
+         Enables additional kernel features in a sake of checkpoint/restore.
+@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
+ config COMPAT_BRK
+       bool "Disable heap randomization"
+-      default y
++      default n
+       help
+         Randomizing heap placement makes heap exploits harder, but it
+         also breaks ancient binaries (including anything libc5 based).
+@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
+ config STOP_MACHINE
+       bool
+       default y
+-      depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
++      depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
+       help
+         Need stop_machine() primitive.
+diff --git a/init/Makefile b/init/Makefile
+index 7bc47ee..6da2dc7 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -2,6 +2,9 @@
+ # Makefile for the linux kernel.
+ #
++ccflags-y := $(GCC_PLUGINS_CFLAGS)
++asflags-y := $(GCC_PLUGINS_AFLAGS)
++
+ obj-y                          := main.o version.o mounts.o
+ ifneq ($(CONFIG_BLK_DEV_INITRD),y)
+ obj-y                          += noinitramfs.o
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index a2b49f2..03a0e17c 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+       struct super_block *s;
+-      int err = sys_mount(name, "/root", fs, flags, data);
++      int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
+       if (err)
+               return err;
+-      sys_chdir("/root");
++      sys_chdir((const char __force_user *)"/root");
+       s = current->fs->pwd.dentry->d_sb;
+       ROOT_DEV = s->s_dev;
+       printk(KERN_INFO
+@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
+       va_start(args, fmt);
+       vsprintf(buf, fmt, args);
+       va_end(args);
+-      fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++      fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, FDEJECT, 0);
+               sys_close(fd);
+       }
+       printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+-      fd = sys_open("/dev/console", O_RDWR, 0);
++      fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, TCGETS, (long)&termios);
+               termios.c_lflag &= ~ICANON;
+               sys_ioctl(fd, TCSETSF, (long)&termios);
+-              sys_read(fd, &c, 1);
++              sys_read(fd, (char __user *)&c, 1);
+               termios.c_lflag |= ICANON;
+               sys_ioctl(fd, TCSETSF, (long)&termios);
+               sys_close(fd);
+@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
+       mount_root();
+ out:
+       devtmpfs_mount("dev");
+-      sys_mount(".", "/", NULL, MS_MOVE, NULL);
+-      sys_chroot(".");
++      sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++      sys_chroot((const char __force_user *)".");
+ }
+diff --git a/init/do_mounts.h b/init/do_mounts.h
+index f5b978a..69dbfe8 100644
+--- a/init/do_mounts.h
++++ b/init/do_mounts.h
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+ static inline int create_dev(char *name, dev_t dev)
+ {
+-      sys_unlink(name);
+-      return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++      sys_unlink((char __force_user *)name);
++      return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+       struct stat64 stat;
+-      if (sys_stat64(name, &stat) != 0)
++      if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
+               return 0;
+       if (!S_ISBLK(stat.st_mode))
+               return 0;
+@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
+ static inline u32 bstat(char *name)
+ {
+       struct stat stat;
+-      if (sys_newstat(name, &stat) != 0)
++      if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
+               return 0;
+       if (!S_ISBLK(stat.st_mode))
+               return 0;
+diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
+index 3e0878e..8a9d7a0 100644
+--- a/init/do_mounts_initrd.c
++++ b/init/do_mounts_initrd.c
+@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
+ {
+       sys_unshare(CLONE_FS | CLONE_FILES);
+       /* stdin/stdout/stderr for /linuxrc */
+-      sys_open("/dev/console", O_RDWR, 0);
++      sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
+       sys_dup(0);
+       sys_dup(0);
+       /* move initrd over / and chdir/chroot in initrd root */
+-      sys_chdir("/root");
+-      sys_mount(".", "/", NULL, MS_MOVE, NULL);
+-      sys_chroot(".");
++      sys_chdir((const char __force_user *)"/root");
++      sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++      sys_chroot((const char __force_user *)".");
+       sys_setsid();
+       return 0;
+ }
+@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
+       create_dev("/dev/root.old", Root_RAM0);
+       /* mount initrd on rootfs' /root */
+       mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+-      sys_mkdir("/old", 0700);
+-      sys_chdir("/old");
++      sys_mkdir((const char __force_user *)"/old", 0700);
++      sys_chdir((const char __force_user *)"/old");
+       /* try loading default modules from initrd */
+       load_default_modules();
+@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
+       current->flags &= ~PF_FREEZER_SKIP;
+       /* move initrd to rootfs' /old */
+-      sys_mount("..", ".", NULL, MS_MOVE, NULL);
++      sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
+       /* switch root and cwd back to / of rootfs */
+-      sys_chroot("..");
++      sys_chroot((const char __force_user *)"..");
+       if (new_decode_dev(real_root_dev) == Root_RAM0) {
+-              sys_chdir("/old");
++              sys_chdir((const char __force_user *)"/old");
+               return;
+       }
+-      sys_chdir("/");
++      sys_chdir((const char __force_user *)"/");
+       ROOT_DEV = new_decode_dev(real_root_dev);
+       mount_root();
+       printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+-      error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++      error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
+       if (!error)
+               printk("okay\n");
+       else {
+-              int fd = sys_open("/dev/root.old", O_RDWR, 0);
++              int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
+               if (error == -ENOENT)
+                       printk("/initrd does not exist. Ignored.\n");
+               else
+                       printk("failed\n");
+               printk(KERN_NOTICE "Unmounting old root\n");
+-              sys_umount("/old", MNT_DETACH);
++              sys_umount((char __force_user *)"/old", MNT_DETACH);
+               printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+               if (fd < 0) {
+                       error = fd;
+@@ -127,11 +127,11 @@ int __init initrd_load(void)
+                * mounted in the normal path.
+                */
+               if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+-                      sys_unlink("/initrd.image");
++                      sys_unlink((const char __force_user *)"/initrd.image");
+                       handle_initrd();
+                       return 1;
+               }
+       }
+-      sys_unlink("/initrd.image");
++      sys_unlink((const char __force_user *)"/initrd.image");
+       return 0;
+ }
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index 8cb6db5..d729f50 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
+                       partitioned ? "_d" : "", minor,
+                       md_setup_args[ent].device_names);
+-              fd = sys_open(name, 0, 0);
++              fd = sys_open((char __force_user *)name, 0, 0);
+               if (fd < 0) {
+                       printk(KERN_ERR "md: open failed - cannot start "
+                                       "array %s\n", name);
+@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
+                        * array without it
+                        */
+                       sys_close(fd);
+-                      fd = sys_open(name, 0, 0);
++                      fd = sys_open((char __force_user *)name, 0, 0);
+                       sys_ioctl(fd, BLKRRPART, 0);
+               }
+               sys_close(fd);
+@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
+       wait_for_device_probe();
+-      fd = sys_open("/dev/md0", 0, 0);
++      fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+               sys_close(fd);
+diff --git a/init/init_task.c b/init/init_task.c
+index ba0a7f36..2bcf1d5 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
+  * Initial thread structure. Alignment of this is handled by a special
+  * linker map entry.
+  */
++#ifdef CONFIG_X86
++union thread_union init_thread_union __init_task_data;
++#else
+ union thread_union init_thread_union __init_task_data =
+       { INIT_THREAD_INFO(init_task) };
++#endif
+diff --git a/init/initramfs.c b/init/initramfs.c
+index a67ef9d..2d17ed9 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -84,7 +84,7 @@ static void __init free_hash(void)
+       }
+ }
+-static long __init do_utime(char *filename, time_t mtime)
++static long __init do_utime(char __force_user *filename, time_t mtime)
+ {
+       struct timespec t[2];
+@@ -119,7 +119,7 @@ static void __init dir_utime(void)
+       struct dir_entry *de, *tmp;
+       list_for_each_entry_safe(de, tmp, &dir_list, list) {
+               list_del(&de->list);
+-              do_utime(de->name, de->mtime);
++              do_utime((char __force_user *)de->name, de->mtime);
+               kfree(de->name);
+               kfree(de);
+       }
+@@ -281,7 +281,7 @@ static int __init maybe_link(void)
+       if (nlink >= 2) {
+               char *old = find_link(major, minor, ino, mode, collected);
+               if (old)
+-                      return (sys_link(old, collected) < 0) ? -1 : 1;
++                      return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
+       }
+       return 0;
+ }
+@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
+ {
+       struct stat st;
+-      if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
++      if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
+               if (S_ISDIR(st.st_mode))
+-                      sys_rmdir(path);
++                      sys_rmdir((char __force_user *)path);
+               else
+-                      sys_unlink(path);
++                      sys_unlink((char __force_user *)path);
+       }
+ }
+@@ -315,7 +315,7 @@ static int __init do_name(void)
+                       int openflags = O_WRONLY|O_CREAT;
+                       if (ml != 1)
+                               openflags |= O_TRUNC;
+-                      wfd = sys_open(collected, openflags, mode);
++                      wfd = sys_open((char __force_user *)collected, openflags, mode);
+                       if (wfd >= 0) {
+                               sys_fchown(wfd, uid, gid);
+@@ -327,17 +327,17 @@ static int __init do_name(void)
+                       }
+               }
+       } else if (S_ISDIR(mode)) {
+-              sys_mkdir(collected, mode);
+-              sys_chown(collected, uid, gid);
+-              sys_chmod(collected, mode);
++              sys_mkdir((char __force_user *)collected, mode);
++              sys_chown((char __force_user *)collected, uid, gid);
++              sys_chmod((char __force_user *)collected, mode);
+               dir_add(collected, mtime);
+       } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+                  S_ISFIFO(mode) || S_ISSOCK(mode)) {
+               if (maybe_link() == 0) {
+-                      sys_mknod(collected, mode, rdev);
+-                      sys_chown(collected, uid, gid);
+-                      sys_chmod(collected, mode);
+-                      do_utime(collected, mtime);
++                      sys_mknod((char __force_user *)collected, mode, rdev);
++                      sys_chown((char __force_user *)collected, uid, gid);
++                      sys_chmod((char __force_user *)collected, mode);
++                      do_utime((char __force_user *)collected, mtime);
+               }
+       }
+       return 0;
+@@ -346,15 +346,15 @@ static int __init do_name(void)
+ static int __init do_copy(void)
+ {
+       if (count >= body_len) {
+-              sys_write(wfd, victim, body_len);
++              sys_write(wfd, (char __force_user *)victim, body_len);
+               sys_close(wfd);
+-              do_utime(vcollected, mtime);
++              do_utime((char __force_user *)vcollected, mtime);
+               kfree(vcollected);
+               eat(body_len);
+               state = SkipIt;
+               return 0;
+       } else {
+-              sys_write(wfd, victim, count);
++              sys_write(wfd, (char __force_user *)victim, count);
+               body_len -= count;
+               eat(count);
+               return 1;
+@@ -365,9 +365,9 @@ static int __init do_symlink(void)
+ {
+       collected[N_ALIGN(name_len) + body_len] = '\0';
+       clean_path(collected, 0);
+-      sys_symlink(collected + N_ALIGN(name_len), collected);
+-      sys_lchown(collected, uid, gid);
+-      do_utime(collected, mtime);
++      sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
++      sys_lchown((char __force_user *)collected, uid, gid);
++      do_utime((char __force_user *)collected, mtime);
+       state = SkipIt;
+       next_state = Reset;
+       return 0;
+@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
+ {
+       char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+       if (err)
+-              panic(err);     /* Failed to decompress INTERNAL initramfs */
++              panic("%s", err);       /* Failed to decompress INTERNAL initramfs */
+       if (initrd_start) {
+ #ifdef CONFIG_BLK_DEV_RAM
+               int fd;
+diff --git a/init/main.c b/init/main.c
+index 9484f4b..0eac7c3 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
+ extern void tc_init(void);
+ #endif
++extern void grsecurity_init(void);
++
+ /*
+  * Debug helper: via this flag we know that we are in 'early bootup code'
+  * where only the boot processor is running with IRQ disabled.  This means
+@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
+ __setup("reset_devices", set_reset_devices);
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
++static int __init setup_grsec_proc_gid(char *str)
++{
++      grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
++      return 1;
++}
++__setup("grsec_proc_gid=", setup_grsec_proc_gid);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++unsigned long pax_user_shadow_base __read_only;
++EXPORT_SYMBOL(pax_user_shadow_base);
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++#endif
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++      unsigned int cpu;
++      struct desc_struct *gdt;
++
++      for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++              gdt = get_cpu_gdt_table(cpu);
++              gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++              gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++              gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++              gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++      }
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++#else
++      memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++      memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++      clone_pgd_mask = ~(pgdval_t)0UL;
++      pax_user_shadow_base = 0UL;
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++#endif
++
++      return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++
++#ifdef CONFIG_X86_64
++static int __init setup_pax_weakuderef(char *str)
++{
++      if (clone_pgd_mask != ~(pgdval_t)0UL)
++              pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++      return 1;
++}
++__setup("pax_weakuderef", setup_pax_weakuderef);
++#endif
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++      get_option(&str, &pax_softmode);
++      return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -655,8 +725,6 @@ static void __init do_ctors(void)
+ bool initcall_debug;
+ core_param(initcall_debug, initcall_debug, bool, 0644);
+-static char msgbuf[64];
+-
+ static int __init_or_module do_one_initcall_debug(initcall_t fn)
+ {
+       ktime_t calltime, delta, rettime;
+@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
+ {
+       int count = preempt_count();
+       int ret;
++      const char *msg1 = "", *msg2 = "";
+       if (initcall_debug)
+               ret = do_one_initcall_debug(fn);
+       else
+               ret = fn();
+-      msgbuf[0] = 0;
+-
+       if (preempt_count() != count) {
+-              sprintf(msgbuf, "preemption imbalance ");
++              msg1 = " preemption imbalance";
+               preempt_count() = count;
+       }
+       if (irqs_disabled()) {
+-              strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++              msg2 = " disabled interrupts";
+               local_irq_enable();
+       }
+-      WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
++      WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
+       return ret;
+ }
+@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
+                  level, level,
+                  &repair_env_string);
+-      for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
++      for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
+               do_one_initcall(*fn);
++
++#ifdef LATENT_ENTROPY_PLUGIN
++              add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#endif
++
++      }
+ }
+ static void __init do_initcalls(void)
+@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
+ {
+       initcall_t *fn;
+-      for (fn = __initcall_start; fn < __initcall0_start; fn++)
++      for (fn = __initcall_start; fn < __initcall0_start; fn++) {
+               do_one_initcall(*fn);
++
++#ifdef LATENT_ENTROPY_PLUGIN
++              add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#endif
++
++      }
+ }
+ /*
+@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
+ {
+       argv_init[0] = init_filename;
+       return do_execve(init_filename,
+-              (const char __user *const __user *)argv_init,
+-              (const char __user *const __user *)envp_init);
++              (const char __user *const __force_user *)argv_init,
++              (const char __user *const __force_user *)envp_init);
+ }
+ static noinline void __init kernel_init_freeable(void);
+@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
+       do_basic_setup();
+       /* Open the /dev/console on the rootfs, this should never fail */
+-      if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++      if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
+               pr_err("Warning: unable to open an initial console.\n");
+       (void) sys_dup(0);
+@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
+       if (!ramdisk_execute_command)
+               ramdisk_execute_command = "/init";
+-      if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++      if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
+               ramdisk_execute_command = NULL;
+               prepare_namespace();
+       }
++      grsecurity_init();
++
+       /*
+        * Ok, we have completed the initial bootup, and
+        * we're essentially up and running. Get rid of the
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index 130dfec..cc88451 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
+ static int proc_ipc_dointvec(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
+ static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
+ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       size_t lenp_bef = *lenp;
+       int rc;
+@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
+ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       size_t lenp_bef = *lenp;
+       int oldval;
+       int rc;
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index 383d638..943fdbb 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
+ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table mq_table;
++      ctl_table_no_const mq_table;
+       memcpy(&mq_table, table, sizeof(mq_table));
+       mq_table.data = get_mq(table);
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index e4e47f6..a85e0ad 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
++              gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
+               spin_lock(&mq_lock);
+               if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+                   u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
+diff --git a/ipc/msg.c b/ipc/msg.c
+index d0c6d96..69a893c 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
+       return security_msg_queue_associate(msq, msgflg);
+ }
++static struct ipc_ops msg_ops = {
++      .getnew         = newque,
++      .associate      = msg_security,
++      .more_checks    = NULL
++};
++
+ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
+ {
+       struct ipc_namespace *ns;
+-      struct ipc_ops msg_ops;
+       struct ipc_params msg_params;
+       ns = current->nsproxy->ipc_ns;
+-      msg_ops.getnew = newque;
+-      msg_ops.associate = msg_security;
+-      msg_ops.more_checks = NULL;
+-
+       msg_params.key = key;
+       msg_params.flg = msgflg;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 70480a3..f4e8262 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
+       return 0;
+ }
++static struct ipc_ops sem_ops = {
++      .getnew         = newary,
++      .associate      = sem_security,
++      .more_checks    = sem_more_checks
++};
++
+ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+ {
+       struct ipc_namespace *ns;
+-      struct ipc_ops sem_ops;
+       struct ipc_params sem_params;
+       ns = current->nsproxy->ipc_ns;
+@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+       if (nsems < 0 || nsems > ns->sc_semmsl)
+               return -EINVAL;
+-      sem_ops.getnew = newary;
+-      sem_ops.associate = sem_security;
+-      sem_ops.more_checks = sem_more_checks;
+-
+       sem_params.key = key;
+       sem_params.flg = semflg;
+       sem_params.u.nsems = nsems;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 7e199fa..180a1ca 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
+ #endif
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++                         const time_t shm_createtime, const kuid_t cuid,
++                         const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++                         const time_t shm_createtime);
++#endif
++
+ void shm_init_ns(struct ipc_namespace *ns)
+ {
+       ns->shm_ctlmax = SHMMAX;
+@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+       shp->shm_lprid = 0;
+       shp->shm_atim = shp->shm_dtim = 0;
+       shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++      {
++              struct timespec timeval;
++              do_posix_clock_monotonic_gettime(&timeval);
++
++              shp->shm_createtime = timeval.tv_sec;
++      }
++#endif
+       shp->shm_segsz = size;
+       shp->shm_nattch = 0;
+       shp->shm_file = file;
+@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
+       return 0;
+ }
++static struct ipc_ops shm_ops = {
++      .getnew         = newseg,
++      .associate      = shm_security,
++      .more_checks    = shm_more_checks
++};
++
+ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
+ {
+       struct ipc_namespace *ns;
+-      struct ipc_ops shm_ops;
+       struct ipc_params shm_params;
+       ns = current->nsproxy->ipc_ns;
+-      shm_ops.getnew = newseg;
+-      shm_ops.associate = shm_security;
+-      shm_ops.more_checks = shm_more_checks;
+-
+       shm_params.key = key;
+       shm_params.flg = shmflg;
+       shm_params.u.size = size;
+@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+               f_mode = FMODE_READ | FMODE_WRITE;
+       }
+       if (shmflg & SHM_EXEC) {
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (current->mm->pax_flags & MF_PAX_MPROTECT)
++                      goto out;
++#endif
++
+               prot |= PROT_EXEC;
+               acc_mode |= S_IXUGO;
+       }
+@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+       if (err)
+               goto out_unlock;
++#ifdef CONFIG_GRKERNSEC
++      if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++                           shp->shm_perm.cuid, shmid) ||
++          !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++              err = -EACCES;
++              goto out_unlock;
++      }
++#endif
++
+       path = shp->shm_file->f_path;
+       path_get(&path);
+       shp->shm_nattch++;
++#ifdef CONFIG_GRKERNSEC
++      shp->shm_lapid = current->pid;
++#endif
+       size = i_size_read(path.dentry->d_inode);
+       shm_unlock(shp);
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 8d6e145..33e0b1e 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
+        */
+       flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+       current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+-      file->f_op->write(file, (char *)&ac,
++      file->f_op->write(file, (char __force_user *)&ac,
+                              sizeof(acct_t), &file->f_pos);
+       current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+       set_fs(fs);
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 91e53d0..d9e3ec4 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -118,7 +118,7 @@ u32                audit_sig_sid = 0;
+    3) suppressed due to audit_rate_limit
+    4) suppressed due to audit_backlog_limit
+ */
+-static atomic_t    audit_lost = ATOMIC_INIT(0);
++static atomic_unchecked_t    audit_lost = ATOMIC_INIT(0);
+ /* The netlink socket. */
+ static struct sock *audit_sock;
+@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
+       unsigned long           now;
+       int                     print;
+-      atomic_inc(&audit_lost);
++      atomic_inc_unchecked(&audit_lost);
+       print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
+@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
+                       printk(KERN_WARNING
+                               "audit: audit_lost=%d audit_rate_limit=%d "
+                               "audit_backlog_limit=%d\n",
+-                              atomic_read(&audit_lost),
++                              atomic_read_unchecked(&audit_lost),
+                               audit_rate_limit,
+                               audit_backlog_limit);
+               audit_panic(message);
+@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+               status_set.pid           = audit_pid;
+               status_set.rate_limit    = audit_rate_limit;
+               status_set.backlog_limit = audit_backlog_limit;
+-              status_set.lost          = atomic_read(&audit_lost);
++              status_set.lost          = atomic_read_unchecked(&audit_lost);
+               status_set.backlog       = skb_queue_len(&audit_skb_queue);
+               audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
+                                &status_set, sizeof(status_set));
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 6bd4a90..0ee9eff 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+               f->lsm_rule = NULL;
+               /* Support legacy tests for a valid loginuid */
+-              if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
++              if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
+                       f->type = AUDIT_LOGINUID_SET;
+                       f->val = 0;
+               }
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 3c8a601..3a416f6 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
+ }
+ /* global counter which is incremented every time something logs in */
+-static atomic_t session_id = ATOMIC_INIT(0);
++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
+ /**
+  * audit_set_loginuid - set current task's audit_context loginuid
+@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
+               return -EPERM;
+ #endif  /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+-      sessionid = atomic_inc_return(&session_id);
++      sessionid = atomic_inc_return_unchecked(&session_id);
+       if (context && context->in_syscall) {
+               struct audit_buffer *ab;
+diff --git a/kernel/capability.c b/kernel/capability.c
+index f6c2ce5..982c0f9 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
+                * before modification is attempted and the application
+                * fails.
+                */
++              if (tocopy > ARRAY_SIZE(kdata))
++                      return -EFAULT;
++
+               if (copy_to_user(dataptr, kdata, tocopy
+                                * sizeof(struct __user_cap_data_struct))) {
+                       return -EFAULT;
+@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
+       int ret;
+       rcu_read_lock();
+-      ret = security_capable(__task_cred(t), ns, cap);
++      ret = security_capable(__task_cred(t), ns, cap) == 0 &&
++              gr_task_is_capable(t, __task_cred(t), cap);
+       rcu_read_unlock();
+-      return (ret == 0);
++      return ret;
+ }
+ /**
+@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
+       int ret;
+       rcu_read_lock();
+-      ret = security_capable_noaudit(__task_cred(t), ns, cap);
++      ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
+       rcu_read_unlock();
+-      return (ret == 0);
++      return ret;
+ }
+ /**
+@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
+               BUG();
+       }
+-      if (security_capable(current_cred(), ns, cap) == 0) {
++      if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
+               current->flags |= PF_SUPERPRIV;
+               return true;
+       }
+@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
+ }
+ EXPORT_SYMBOL(ns_capable);
++bool ns_capable_nolog(struct user_namespace *ns, int cap)
++{
++      if (unlikely(!cap_valid(cap))) {
++              printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
++              BUG();
++      }
++
++      if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
++              current->flags |= PF_SUPERPRIV;
++              return true;
++      }
++      return false;
++}
++EXPORT_SYMBOL(ns_capable_nolog);
++
+ /**
+  * file_ns_capable - Determine if the file's opener had a capability in effect
+  * @file:  The file we want to check
+@@ -432,6 +451,12 @@ bool capable(int cap)
+ }
+ EXPORT_SYMBOL(capable);
++bool capable_nolog(int cap)
++{
++      return ns_capable_nolog(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_nolog);
++
+ /**
+  * nsown_capable - Check superior capability to one's own user_ns
+  * @cap: The capability in question
+@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
+       return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
+ }
++
++bool inode_capable_nolog(const struct inode *inode, int cap)
++{
++      struct user_namespace *ns = current_user_ns();
++
++      return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
++}
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 2e9b387..61817b1 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
+               struct css_set *cg = link->cg;
+               struct task_struct *task;
+               int count = 0;
+-              seq_printf(seq, "css_set %p\n", cg);
++              seq_printf(seq, "css_set %pK\n", cg);
+               list_for_each_entry(task, &cg->tasks, cg_list) {
+                       if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
+                               seq_puts(seq, "  ...\n");
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 0a09e48..f44f3f0 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -13,6 +13,7 @@
+ #include <linux/linkage.h>
+ #include <linux/compat.h>
++#include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/time.h>
+ #include <linux/signal.h>
+@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
+       mm_segment_t oldfs;
+       long ret;
+-      restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
++      restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = hrtimer_nanosleep_restart(restart);
+@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = hrtimer_nanosleep(&tu,
+-                              rmtp ? (struct timespec __user *)&rmt : NULL,
++                              rmtp ? (struct timespec __force_user *)&rmt : NULL,
+                               HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+       set_fs(oldfs);
+@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_sigpending((old_sigset_t __user *) &s);
++      ret = sys_sigpending((old_sigset_t __force_user *) &s);
+       set_fs(old_fs);
+       if (ret == 0)
+               ret = put_user(s, set);
+@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_old_getrlimit(resource, &r);
++      ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
+       set_fs(old_fs);
+       if (!ret) {
+@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
+               set_fs (KERNEL_DS);
+               ret = sys_wait4(pid,
+                               (stat_addr ?
+-                               (unsigned int __user *) &status : NULL),
+-                              options, (struct rusage __user *) &r);
++                               (unsigned int __force_user *) &status : NULL),
++                              options, (struct rusage __force_user *) &r);
+               set_fs (old_fs);
+               if (ret > 0) {
+@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
+       memset(&info, 0, sizeof(info));
+       set_fs(KERNEL_DS);
+-      ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
+-                       uru ? (struct rusage __user *)&ru : NULL);
++      ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
++                       uru ? (struct rusage __force_user *)&ru : NULL);
+       set_fs(old_fs);
+       if ((ret < 0) || (info.si_signo == 0))
+@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_timer_settime(timer_id, flags,
+-                              (struct itimerspec __user *) &newts,
+-                              (struct itimerspec __user *) &oldts);
++                              (struct itimerspec __force_user *) &newts,
++                              (struct itimerspec __force_user *) &oldts);
+       set_fs(oldfs);
+       if (!err && old && put_compat_itimerspec(old, &oldts))
+               return -EFAULT;
+@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_timer_gettime(timer_id,
+-                              (struct itimerspec __user *) &ts);
++                              (struct itimerspec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && put_compat_itimerspec(setting, &ts))
+               return -EFAULT;
+@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_settime(which_clock,
+-                              (struct timespec __user *) &ts);
++                              (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       return err;
+ }
+@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_gettime(which_clock,
+-                              (struct timespec __user *) &ts);
++                              (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && put_compat_timespec(&ts, tp))
+               return -EFAULT;
+@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
++      ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
+       set_fs(oldfs);
+       err = compat_put_timex(utp, &txc);
+@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_getres(which_clock,
+-                             (struct timespec __user *) &ts);
++                             (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && tp && put_compat_timespec(&ts, tp))
+               return -EFAULT;
+@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
+       long err;
+       mm_segment_t oldfs;
+       struct timespec tu;
+-      struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
++      struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
+-      restart->nanosleep.rmtp = (struct timespec __user *) &tu;
++      restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = clock_nanosleep_restart(restart);
+@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_nanosleep(which_clock, flags,
+-                                (struct timespec __user *) &in,
+-                                (struct timespec __user *) &out);
++                                (struct timespec __force_user *) &in,
++                                (struct timespec __force_user *) &out);
+       set_fs(oldfs);
+       if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
+diff --git a/kernel/configs.c b/kernel/configs.c
+index c18b1f1..b9a0132 100644
+--- a/kernel/configs.c
++++ b/kernel/configs.c
+@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
+       struct proc_dir_entry *entry;
+       /* create the current config file */
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
++      entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
++                          &ikconfig_file_ops);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
++                          &ikconfig_file_ops);
++#endif
++#else
+       entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+                           &ikconfig_file_ops);
++#endif
++
+       if (!entry)
+               return -ENOMEM;
+diff --git a/kernel/cred.c b/kernel/cred.c
+index e0573a4..3874e41 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
+       validate_creds(cred);
+       alter_cred_subscribers(cred, -1);
+       put_cred(cred);
++
++#ifdef CONFIG_GRKERNSEC_SETXID
++      cred = (struct cred *) tsk->delayed_cred;
++      if (cred != NULL) {
++              tsk->delayed_cred = NULL;
++              validate_creds(cred);
++              alter_cred_subscribers(cred, -1);
++              put_cred(cred);
++      }
++#endif
+ }
+ /**
+@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
+  * Always returns 0 thus allowing this function to be tail-called at the end
+  * of, say, sys_setgid().
+  */
+-int commit_creds(struct cred *new)
++static int __commit_creds(struct cred *new)
+ {
+       struct task_struct *task = current;
+       const struct cred *old = task->real_cred;
+@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
+       get_cred(new); /* we will require a ref for the subj creds too */
++      gr_set_role_label(task, new->uid, new->gid);
++
+       /* dumpability changes */
+       if (!uid_eq(old->euid, new->euid) ||
+           !gid_eq(old->egid, new->egid) ||
+@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
+       put_cred(old);
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int set_user(struct cred *new);
++
++void gr_delayed_cred_worker(void)
++{
++      const struct cred *new = current->delayed_cred;
++      struct cred *ncred;
++
++      current->delayed_cred = NULL;
++
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
++              // from doing get_cred on it when queueing this
++              put_cred(new);
++              return;
++      } else if (new == NULL)
++              return;
++
++      ncred = prepare_creds();
++      if (!ncred)
++              goto die;
++      // uids
++      ncred->uid = new->uid;
++      ncred->euid = new->euid;
++      ncred->suid = new->suid;
++      ncred->fsuid = new->fsuid;
++      // gids
++      ncred->gid = new->gid;
++      ncred->egid = new->egid;
++      ncred->sgid = new->sgid;
++      ncred->fsgid = new->fsgid;
++      // groups
++      if (set_groups(ncred, new->group_info) < 0) {
++              abort_creds(ncred);
++              goto die;
++      }
++      // caps
++      ncred->securebits = new->securebits;
++      ncred->cap_inheritable = new->cap_inheritable;
++      ncred->cap_permitted = new->cap_permitted;
++      ncred->cap_effective = new->cap_effective;
++      ncred->cap_bset = new->cap_bset;
++
++      if (set_user(ncred)) {
++              abort_creds(ncred);
++              goto die;
++      }
++
++      // from doing get_cred on it when queueing this
++      put_cred(new);
++
++      __commit_creds(ncred);
++      return;
++die:
++      // from doing get_cred on it when queueing this
++      put_cred(new);
++      do_group_exit(SIGKILL);
++}
++#endif
++
++int commit_creds(struct cred *new)
++{
++#ifdef CONFIG_GRKERNSEC_SETXID
++      int ret;
++      int schedule_it = 0;
++      struct task_struct *t;
++
++      /* we won't get called with tasklist_lock held for writing
++         and interrupts disabled as the cred struct in that case is
++         init_cred
++      */
++      if (grsec_enable_setxid && !current_is_single_threaded() &&
++          uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
++          !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
++              schedule_it = 1;
++      }
++      ret = __commit_creds(new);
++      if (schedule_it) {
++              rcu_read_lock();
++              read_lock(&tasklist_lock);
++              for (t = next_thread(current); t != current;
++                   t = next_thread(t)) {
++                      if (t->delayed_cred == NULL) {
++                              t->delayed_cred = get_cred(new);
++                              set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
++                              set_tsk_need_resched(t);
++                      }
++              }
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++      }
++      return ret;
++#else
++      return __commit_creds(new);
++#endif
++}
++
+ EXPORT_SYMBOL(commit_creds);
+ /**
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 0506d44..2c20034 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+  */
+ static atomic_t                       masters_in_kgdb;
+ static atomic_t                       slaves_in_kgdb;
+-static atomic_t                       kgdb_break_tasklet_var;
++static atomic_unchecked_t     kgdb_break_tasklet_var;
+ atomic_t                      kgdb_setting_breakpoint;
+ struct task_struct            *kgdb_usethread;
+@@ -133,7 +133,7 @@ int                                kgdb_single_step;
+ static pid_t                  kgdb_sstep_pid;
+ /* to keep track of the CPU which is doing the single stepping*/
+-atomic_t                      kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
++atomic_unchecked_t            kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+ /*
+  * If you are debugging a problem where roundup (the collection of
+@@ -541,7 +541,7 @@ return_normal:
+        * kernel will only try for the value of sstep_tries before
+        * giving up and continuing on.
+        */
+-      if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
++      if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
+           (kgdb_info[cpu].task &&
+            kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+               atomic_set(&kgdb_active, -1);
+@@ -635,8 +635,8 @@ cpu_master_loop:
+       }
+ kgdb_restore:
+-      if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+-              int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
++      if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
++              int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
+               if (kgdb_info[sstep_cpu].task)
+                       kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+               else
+@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
+ static void kgdb_tasklet_bpt(unsigned long ing)
+ {
+       kgdb_breakpoint();
+-      atomic_set(&kgdb_break_tasklet_var, 0);
++      atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
+ }
+ static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+ void kgdb_schedule_breakpoint(void)
+ {
+-      if (atomic_read(&kgdb_break_tasklet_var) ||
++      if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
+               atomic_read(&kgdb_active) != -1 ||
+               atomic_read(&kgdb_setting_breakpoint))
+               return;
+-      atomic_inc(&kgdb_break_tasklet_var);
++      atomic_inc_unchecked(&kgdb_break_tasklet_var);
+       tasklet_schedule(&kgdb_tasklet_breakpoint);
+ }
+ EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 00eb8f7..d7e3244 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
+                       continue;
+               kdb_printf("%-20s%8u  0x%p ", mod->name,
+-                         mod->core_size, (void *)mod);
++                         mod->core_size_rx + mod->core_size_rw, (void *)mod);
+ #ifdef CONFIG_MODULE_UNLOAD
+               kdb_printf("%4ld ", module_refcount(mod));
+ #endif
+@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
+                       kdb_printf(" (Loading)");
+               else
+                       kdb_printf(" (Live)");
+-              kdb_printf(" 0x%p", mod->module_core);
++              kdb_printf(" 0x%p 0x%p", mod->module_core_rx,  mod->module_core_rw);
+ #ifdef CONFIG_MODULE_UNLOAD
+               {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e76e495..cbfe63a 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
+  *   0 - disallow raw tracepoint access for unpriv
+  *   1 - disallow cpu events for unpriv
+  *   2 - disallow kernel profiling for unpriv
++ *   3 - disallow all unpriv perf event use
+  */
+-int sysctl_perf_event_paranoid __read_mostly = 1;
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
++#elif defined(CONFIG_GRKERNSEC_HIDESYM)
++int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
++#else
++int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
++#endif
+ /* Minimum for 512 kiB + 1 user control page */
+ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
+@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
+       return 0;
+ }
+-static atomic64_t perf_event_id;
++static atomic64_unchecked_t perf_event_id;
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type);
+@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
+ static inline u64 perf_event_count(struct perf_event *event)
+ {
+-      return local64_read(&event->count) + atomic64_read(&event->child_count);
++      return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
+ }
+ static u64 perf_event_read(struct perf_event *event)
+@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+       mutex_lock(&event->child_mutex);
+       total += perf_event_read(event);
+       *enabled += event->total_time_enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       *running += event->total_time_running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       list_for_each_entry(child, &event->child_list, child_list) {
+               total += perf_event_read(child);
+@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
+               userpg->offset -= local64_read(&event->hw.prev_count);
+       userpg->time_enabled = enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       userpg->time_running = running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       arch_perf_update_userpage(userpg, now);
+@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+               /* Data. */
+               sp = perf_user_stack_pointer(regs);
+-              rem = __output_copy_user(handle, (void *) sp, dump_size);
++              rem = __output_copy_user(handle, (void __user *) sp, dump_size);
+               dyn_size = dump_size - rem;
+               perf_output_skip(handle, rem);
+@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+       values[n++] = perf_event_count(event);
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+               values[n++] = enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       }
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+               values[n++] = running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       }
+       if (read_format & PERF_FORMAT_ID)
+               values[n++] = primary_event_id(event);
+@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+                * need to add enough zero bytes after the string to handle
+                * the 64bit alignment we do later.
+                */
+-              buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
++              buf = kzalloc(PATH_MAX, GFP_KERNEL);
+               if (!buf) {
+                       name = strncpy(tmp, "//enomem", sizeof(tmp));
+                       goto got_name;
+               }
+-              name = d_path(&file->f_path, buf, PATH_MAX);
++              name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
+               if (IS_ERR(name)) {
+                       name = strncpy(tmp, "//toolong", sizeof(tmp));
+                       goto got_name;
+@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+       event->parent           = parent_event;
+       event->ns               = get_pid_ns(task_active_pid_ns(current));
+-      event->id               = atomic64_inc_return(&perf_event_id);
++      event->id               = atomic64_inc_return_unchecked(&perf_event_id);
+       event->state            = PERF_EVENT_STATE_INACTIVE;
+@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       if (flags & ~PERF_FLAG_ALL)
+               return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++      if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
++              return -EACCES;
++#endif
++
+       err = perf_copy_attr(attr_uptr, &attr);
+       if (err)
+               return err;
+@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
+       /*
+        * Add back the child's count to the parent's count:
+        */
+-      atomic64_add(child_val, &parent_event->child_count);
+-      atomic64_add(child_event->total_time_enabled,
++      atomic64_add_unchecked(child_val, &parent_event->child_count);
++      atomic64_add_unchecked(child_event->total_time_enabled,
+                    &parent_event->child_total_time_enabled);
+-      atomic64_add(child_event->total_time_running,
++      atomic64_add_unchecked(child_event->total_time_running,
+                    &parent_event->child_total_time_running);
+       /*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index ca65997..cc8cee4 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
+       return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
+ }
+-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                    \
++#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user)              \
+ static inline unsigned int                                            \
+ func_name(struct perf_output_handle *handle,                          \
+-        const void *buf, unsigned int len)                            \
++        const void user *buf, unsigned int len)                       \
+ {                                                                     \
+       unsigned long size, written;                                    \
+                                                                       \
+@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
+       return n;
+ }
+-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
++DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
+ #define MEMCPY_SKIP(dst, src, n) (n)
+-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
++DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
+ #ifndef arch_perf_out_copy_user
+ #define arch_perf_out_copy_user __copy_from_user_inatomic
+ #endif
+-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
++DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
+ /* Callchain handling */
+ extern struct perf_callchain_entry *
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 7bb73f9..d7978ed 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
+       struct task_struct *leader;
+       int zap_leader;
+ repeat:
++#ifdef CONFIG_NET
++      gr_del_task_from_ip_table(p);
++#endif
++
+       /* don't need to get the RCU readlock here - the process is dead and
+        * can't be modifying its own credentials. But shut RCU-lockdep up */
+       rcu_read_lock();
+@@ -340,7 +344,7 @@ int allow_signal(int sig)
+        * know it'll be handled, so that they don't get converted to
+        * SIGKILL or just silently dropped.
+        */
+-      current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
++      current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       return 0;
+@@ -709,6 +713,8 @@ void do_exit(long code)
+       struct task_struct *tsk = current;
+       int group_dead;
++      set_fs(USER_DS);
++
+       profile_task_exit(tsk);
+       WARN_ON(blk_needs_flush_plug(tsk));
+@@ -725,7 +731,6 @@ void do_exit(long code)
+        * mm_release()->clear_child_tid() from writing to a user-controlled
+        * kernel address.
+        */
+-      set_fs(USER_DS);
+       ptrace_event(PTRACE_EVENT_EXIT, code);
+@@ -784,6 +789,9 @@ void do_exit(long code)
+       tsk->exit_code = code;
+       taskstats_exit(tsk, group_dead);
++      gr_acl_handle_psacct(tsk, code);
++      gr_acl_handle_exit();
++
+       exit_mm(tsk);
+       if (group_dead)
+@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+  * Take down every thread in the group.  This is called by fatal signals
+  * as well as by sys_exit_group (below).
+  */
+-void
++__noreturn void
+ do_group_exit(int exit_code)
+ {
+       struct signal_struct *sig = current->signal;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ffbc090..08ceeee 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+       *stackend = STACK_END_MAGIC;    /* for overflow detection */
+ #ifdef CONFIG_CC_STACKPROTECTOR
+-      tsk->stack_canary = get_random_int();
++      tsk->stack_canary = pax_get_random_long();
+ #endif
+       /*
+@@ -345,13 +345,81 @@ free_tsk:
+ }
+ #ifdef CONFIG_MMU
++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
++{
++      struct vm_area_struct *tmp;
++      unsigned long charge;
++      struct mempolicy *pol;
++      struct file *file;
++
++      charge = 0;
++      if (mpnt->vm_flags & VM_ACCOUNT) {
++              unsigned long len = vma_pages(mpnt);
++
++              if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
++                      goto fail_nomem;
++              charge = len;
++      }
++      tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++      if (!tmp)
++              goto fail_nomem;
++      *tmp = *mpnt;
++      tmp->vm_mm = mm;
++      INIT_LIST_HEAD(&tmp->anon_vma_chain);
++      pol = mpol_dup(vma_policy(mpnt));
++      if (IS_ERR(pol))
++              goto fail_nomem_policy;
++      vma_set_policy(tmp, pol);
++      if (anon_vma_fork(tmp, mpnt))
++              goto fail_nomem_anon_vma_fork;
++      tmp->vm_flags &= ~VM_LOCKED;
++      tmp->vm_next = tmp->vm_prev = NULL;
++      tmp->vm_mirror = NULL;
++      file = tmp->vm_file;
++      if (file) {
++              struct inode *inode = file_inode(file);
++              struct address_space *mapping = file->f_mapping;
++
++              get_file(file);
++              if (tmp->vm_flags & VM_DENYWRITE)
++                      atomic_dec(&inode->i_writecount);
++              mutex_lock(&mapping->i_mmap_mutex);
++              if (tmp->vm_flags & VM_SHARED)
++                      mapping->i_mmap_writable++;
++              flush_dcache_mmap_lock(mapping);
++              /* insert tmp into the share list, just after mpnt */
++              if (unlikely(tmp->vm_flags & VM_NONLINEAR))
++                      vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
++              else
++                      vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
++              flush_dcache_mmap_unlock(mapping);
++              mutex_unlock(&mapping->i_mmap_mutex);
++      }
++
++      /*
++       * Clear hugetlb-related page reserves for children. This only
++       * affects MAP_PRIVATE mappings. Faults generated by the child
++       * are not guaranteed to succeed, even if read-only
++       */
++      if (is_vm_hugetlb_page(tmp))
++              reset_vma_resv_huge_pages(tmp);
++
++      return tmp;
++
++fail_nomem_anon_vma_fork:
++      mpol_put(pol);
++fail_nomem_policy:
++      kmem_cache_free(vm_area_cachep, tmp);
++fail_nomem:
++      vm_unacct_memory(charge);
++      return NULL;
++}
++
+ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+       struct rb_node **rb_link, *rb_parent;
+       int retval;
+-      unsigned long charge;
+-      struct mempolicy *pol;
+       uprobe_start_dup_mmap();
+       down_write(&oldmm->mmap_sem);
+@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+       mm->locked_vm = 0;
+       mm->mmap = NULL;
+       mm->mmap_cache = NULL;
+-      mm->free_area_cache = oldmm->mmap_base;
+-      mm->cached_hole_size = ~0UL;
++      mm->free_area_cache = oldmm->free_area_cache;
++      mm->cached_hole_size = oldmm->cached_hole_size;
+       mm->map_count = 0;
+       cpumask_clear(mm_cpumask(mm));
+       mm->mm_rb = RB_ROOT;
+@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+       prev = NULL;
+       for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+-              struct file *file;
+-
+               if (mpnt->vm_flags & VM_DONTCOPY) {
+                       vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
+                                                       -vma_pages(mpnt));
+                       continue;
+               }
+-              charge = 0;
+-              if (mpnt->vm_flags & VM_ACCOUNT) {
+-                      unsigned long len = vma_pages(mpnt);
+-
+-                      if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+-                              goto fail_nomem;
+-                      charge = len;
+-              }
+-              tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+-              if (!tmp)
+-                      goto fail_nomem;
+-              *tmp = *mpnt;
+-              INIT_LIST_HEAD(&tmp->anon_vma_chain);
+-              pol = mpol_dup(vma_policy(mpnt));
+-              retval = PTR_ERR(pol);
+-              if (IS_ERR(pol))
+-                      goto fail_nomem_policy;
+-              vma_set_policy(tmp, pol);
+-              tmp->vm_mm = mm;
+-              if (anon_vma_fork(tmp, mpnt))
+-                      goto fail_nomem_anon_vma_fork;
+-              tmp->vm_flags &= ~VM_LOCKED;
+-              tmp->vm_next = tmp->vm_prev = NULL;
+-              file = tmp->vm_file;
+-              if (file) {
+-                      struct inode *inode = file_inode(file);
+-                      struct address_space *mapping = file->f_mapping;
+-
+-                      get_file(file);
+-                      if (tmp->vm_flags & VM_DENYWRITE)
+-                              atomic_dec(&inode->i_writecount);
+-                      mutex_lock(&mapping->i_mmap_mutex);
+-                      if (tmp->vm_flags & VM_SHARED)
+-                              mapping->i_mmap_writable++;
+-                      flush_dcache_mmap_lock(mapping);
+-                      /* insert tmp into the share list, just after mpnt */
+-                      if (unlikely(tmp->vm_flags & VM_NONLINEAR))
+-                              vma_nonlinear_insert(tmp,
+-                                              &mapping->i_mmap_nonlinear);
+-                      else
+-                              vma_interval_tree_insert_after(tmp, mpnt,
+-                                                      &mapping->i_mmap);
+-                      flush_dcache_mmap_unlock(mapping);
+-                      mutex_unlock(&mapping->i_mmap_mutex);
++              tmp = dup_vma(mm, oldmm, mpnt);
++              if (!tmp) {
++                      retval = -ENOMEM;
++                      goto out;
+               }
+               /*
+@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+               if (retval)
+                       goto out;
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++              struct vm_area_struct *mpnt_m;
++
++              for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++                      BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++                      if (!mpnt->vm_mirror)
++                              continue;
++
++                      if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++                              BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++                              mpnt->vm_mirror = mpnt_m;
++                      } else {
++                              BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++                              mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++                              mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++                              mpnt->vm_mirror->vm_mirror = mpnt;
++                      }
++              }
++              BUG_ON(mpnt_m);
++      }
++#endif
++
+       /* a new mm has just been created */
+       arch_dup_mmap(oldmm, mm);
+       retval = 0;
+@@ -473,14 +524,6 @@ out:
+       up_write(&oldmm->mmap_sem);
+       uprobe_end_dup_mmap();
+       return retval;
+-fail_nomem_anon_vma_fork:
+-      mpol_put(pol);
+-fail_nomem_policy:
+-      kmem_cache_free(vm_area_cachep, tmp);
+-fail_nomem:
+-      retval = -ENOMEM;
+-      vm_unacct_memory(charge);
+-      goto out;
+ }
+ static inline int mm_alloc_pgd(struct mm_struct *mm)
+@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+               return ERR_PTR(err);
+       mm = get_task_mm(task);
+-      if (mm && mm != current->mm &&
+-                      !ptrace_may_access(task, mode)) {
++      if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
++                (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
+               mmput(mm);
+               mm = ERR_PTR(-EACCES);
+       }
+@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+                       spin_unlock(&fs->lock);
+                       return -EAGAIN;
+               }
+-              fs->users++;
++              atomic_inc(&fs->users);
+               spin_unlock(&fs->lock);
+               return 0;
+       }
+       tsk->fs = copy_fs_struct(fs);
+       if (!tsk->fs)
+               return -ENOMEM;
++      /* Carry through gr_chroot_dentry and is_chrooted instead
++         of recomputing it here.  Already copied when the task struct
++         is duplicated.  This allows pivot_root to not be treated as
++         a chroot
++      */
++      //gr_set_chroot_entries(tsk, &tsk->fs->root);
++
+       return 0;
+ }
+@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+       DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
+       retval = -EAGAIN;
++
++      gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
++
+       if (atomic_read(&p->real_cred->user->processes) >=
+                       task_rlimit(p, RLIMIT_NPROC)) {
+-              if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+-                  p->real_cred->user != INIT_USER)
++              if (p->real_cred->user != INIT_USER &&
++                  !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+                       goto bad_fork_free;
+       }
+       current->flags &= ~PF_NPROC_EXCEEDED;
+@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+               goto bad_fork_free_pid;
+       }
++      /* synchronizes with gr_set_acls()
++         we need to call this past the point of no return for fork()
++      */
++      gr_copy_label(p);
++
+       if (clone_flags & CLONE_THREAD) {
+               current->signal->nr_threads++;
+               atomic_inc(&current->signal->live);
+@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+       free_task(p);
+ fork_out:
++      gr_log_forkfail(retval);
++
+       return ERR_PTR(retval);
+ }
+@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
+               if (clone_flags & CLONE_PARENT_SETTID)
+                       put_user(nr, parent_tidptr);
++              gr_handle_brute_check();
++
+               if (clone_flags & CLONE_VFORK) {
+                       p->vfork_done = &vfork;
+                       init_completion(&vfork);
+@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
+       mm_cachep = kmem_cache_create("mm_struct",
+                       sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+-      vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
++      vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
+       mmap_init();
+       nsproxy_cache_init();
+ }
+@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+               return 0;
+       /* don't need lock here; in the worst case we'll do useless copy */
+-      if (fs->users == 1)
++      if (atomic_read(&fs->users) == 1)
+               return 0;
+       *new_fsp = copy_fs_struct(fs);
+@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+                       fs = current->fs;
+                       spin_lock(&fs->lock);
+                       current->fs = new_fs;
+-                      if (--fs->users)
++                      gr_set_chroot_entries(current, &current->fs->root);
++                      if (atomic_dec_return(&fs->users))
+                               new_fs = NULL;
+                       else
+                               new_fs = fs;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 49dacfb..5c6b450 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -54,6 +54,7 @@
+ #include <linux/mount.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
++#include <linux/ptrace.h>
+ #include <linux/signal.h>
+ #include <linux/export.h>
+ #include <linux/magic.h>
+@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+       struct page *page, *page_head;
+       int err, ro = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++              return -EFAULT;
++#endif
++
+       /*
+        * The futex address must be "naturally" aligned.
+        */
+@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
+ {
+       u32 curval;
+       int i;
++      mm_segment_t oldfs;
+       /*
+        * This will fail and we want it. Some arch implementations do
+@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
+        * implementation, the non-functional ones will return
+        * -ENOSYS.
+        */
++      oldfs = get_fs();
++      set_fs(USER_DS);
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+               futex_cmpxchg_enabled = 1;
++      set_fs(oldfs);
+       for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+               plist_head_init(&futex_queues[i].chain);
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index f9f44fd..29885e4 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+       return 0;
+ }
+-static void __user *futex_uaddr(struct robust_list __user *entry,
++static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
+                               compat_long_t futex_offset)
+ {
+       compat_uptr_t base = ptr_to_compat(entry);
+diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
+index 9b22d03..6295b62 100644
+--- a/kernel/gcov/base.c
++++ b/kernel/gcov/base.c
+@@ -102,11 +102,6 @@ void gcov_enable_events(void)
+ }
+ #ifdef CONFIG_MODULES
+-static inline int within(void *addr, void *start, unsigned long size)
+-{
+-      return ((addr >= start) && (addr < start + size));
+-}
+-
+ /* Update list and generate events when modules are unloaded. */
+ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+                               void *data)
+@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+       prev = NULL;
+       /* Remove entries located in module from linked list. */
+       for (info = gcov_info_head; info; info = info->next) {
+-              if (within(info, mod->module_core, mod->core_size)) {
++              if (within_module_core_rw((unsigned long)info, mod)) {
+                       if (prev)
+                               prev->next = info->next;
+                       else
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 2288fbd..0f3941f 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
+       local_irq_restore(flags);
+ }
+-static void run_hrtimer_softirq(struct softirq_action *h)
++static void run_hrtimer_softirq(void)
+ {
+       hrtimer_peek_ahead_timers();
+ }
+@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata hrtimers_nb = {
++static struct notifier_block hrtimers_nb = {
+       .notifier_call = hrtimer_cpu_notify,
+ };
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index 55fcce6..0e4cf34 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block cpu_notify;
++static struct notifier_block cpu_notify = {
++      .notifier_call = irq_work_cpu_notify,
++      .priority = 0,
++};
+ static __init int irq_work_init_cpu_notifier(void)
+ {
+-      cpu_notify.notifier_call = irq_work_cpu_notify;
+-      cpu_notify.priority = 0;
+       register_cpu_notifier(&cpu_notify);
+       return 0;
+ }
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 60f48fa..7f3a770 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -13,6 +13,7 @@
+ #include <linux/sort.h>
+ #include <linux/err.h>
+ #include <linux/static_key.h>
++#include <linux/mm.h>
+ #ifdef HAVE_JUMP_LABEL
+@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
+       size = (((unsigned long)stop - (unsigned long)start)
+                                       / sizeof(struct jump_entry));
++      pax_open_kernel();
+       sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
++      pax_close_kernel();
+ }
+ static void jump_label_update(struct static_key *key, int enable);
+@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
+       struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
+       struct jump_entry *iter;
++      pax_open_kernel();
+       for (iter = iter_start; iter < iter_stop; iter++) {
+               if (within_module_init(iter->code, mod))
+                       iter->code = 0;
+       }
++      pax_close_kernel();
+ }
+ static int
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 3127ad5..159d880 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -11,6 +11,9 @@
+  *      Changed the compression method from stem compression to "table lookup"
+  *      compression (see scripts/kallsyms.c for a more complete description)
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++      if (system_state != SYSTEM_BOOTING)
++              return 0;
++
+       if (addr >= (unsigned long)_sinittext
+           && addr <= (unsigned long)_einittext)
+               return 1;
+       return 0;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_MODULES
++static inline int is_module_text(unsigned long addr)
++{
++      if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
++              return 1;
++
++      addr = ktla_ktva(addr);
++      return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
++}
++#else
++static inline int is_module_text(unsigned long addr)
++{
++      return 0;
++}
++#endif
++#endif
++
+ static inline int is_kernel_text(unsigned long addr)
+ {
+       if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
+ static inline int is_kernel(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (is_kernel_text(addr) || is_kernel_inittext(addr))
++              return 1;
++
++      if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
++#else
+       if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
++#endif
++
+               return 1;
+       return in_gate_area_no_mm(addr);
+ }
+ static int is_ksym_addr(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (is_module_text(addr))
++              return 0;
++#endif
++
+       if (all_var)
+               return is_kernel(addr);
+@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+-      iter->name[0] = '\0';
+       iter->nameoff = get_symbol_offset(new_pos);
+       iter->pos = new_pos;
+ }
+@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
+ {
+       struct kallsym_iter *iter = m->private;
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
++              return 0;
++#endif
++
+       /* Some debugging symbols have no name.  Ignore them. */
+       if (!iter->name[0])
+               return 0;
+@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
+                */
+               type = iter->exported ? toupper(iter->type) :
+                                       tolower(iter->type);
++
+               seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
+                          type, iter->name, iter->module_name);
+       } else
+@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
+       struct kallsym_iter *iter;
+       int ret;
+-      iter = kmalloc(sizeof(*iter), GFP_KERNEL);
++      iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
+       reset_iter(iter, 0);
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c
+index e30ac0f..3528cac 100644
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+       struct task_struct *task1, *task2;
+       int ret;
++#ifdef CONFIG_GRKERNSEC
++      return -ENOSYS;
++#endif
++
+       rcu_read_lock();
+       /*
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 59f7b55..4022f65 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
+                               unsigned long flags)
+ {
+       struct compat_kexec_segment in;
+-      struct kexec_segment out, __user *ksegments;
++      struct kexec_segment out;
++      struct kexec_segment __user *ksegments;
+       unsigned long i, result;
+       /* Don't allow clients that don't understand the native
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 8241906..d625f2c 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
+       kfree(info->argv);
+ }
+-static int call_modprobe(char *module_name, int wait)
++static int call_modprobe(char *module_name, char *module_param, int wait)
+ {
+       struct subprocess_info *info;
+       static char *envp[] = {
+@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
+               NULL
+       };
+-      char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++      char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
+       if (!argv)
+               goto out;
+@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
+       argv[1] = "-q";
+       argv[2] = "--";
+       argv[3] = module_name;  /* check free_modprobe_argv() */
+-      argv[4] = NULL;
++      argv[4] = module_param;
++      argv[5] = NULL;
+       info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
+                                        NULL, free_modprobe_argv, NULL);
+@@ -129,9 +130,8 @@ out:
+  * If module auto-loading support is disabled then this function
+  * becomes a no-operation.
+  */
+-int __request_module(bool wait, const char *fmt, ...)
++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
+ {
+-      va_list args;
+       char module_name[MODULE_NAME_LEN];
+       unsigned int max_modprobes;
+       int ret;
+@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
+        */
+       WARN_ON_ONCE(wait && current_is_async());
+-      va_start(args, fmt);
+-      ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
+-      va_end(args);
++      ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
+       if (ret >= MODULE_NAME_LEN)
+               return -ENAMETOOLONG;
+@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
+       if (ret)
+               return ret;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++              /* hack to workaround consolekit/udisks stupidity */
++              read_lock(&tasklist_lock);
++              if (!strcmp(current->comm, "mount") &&
++                  current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
++                      read_unlock(&tasklist_lock);
++                      printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
++                      return -EPERM;
++              }
++              read_unlock(&tasklist_lock);
++      }
++#endif
++
+       /* If modprobe needs a service that is in a module, we get a recursive
+        * loop.  Limit the number of running kmod threads to max_threads/2 or
+        * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
+@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
+       trace_module_request(module_name, wait, _RET_IP_);
+-      ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
++      ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+       atomic_dec(&kmod_concurrent);
+       return ret;
+ }
++
++int ___request_module(bool wait, char *module_param, const char *fmt, ...)
++{
++      va_list args;
++      int ret;
++
++      va_start(args, fmt);
++      ret = ____request_module(wait, module_param, fmt, args);
++      va_end(args);
++
++      return ret;
++}
++
++int __request_module(bool wait, const char *fmt, ...)
++{
++      va_list args;
++      int ret;
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++              char module_param[MODULE_NAME_LEN];
++
++              memset(module_param, 0, sizeof(module_param));
++
++              snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
++
++              va_start(args, fmt);
++              ret = ____request_module(wait, module_param, fmt, args);
++              va_end(args);
++
++              return ret;
++      }
++#endif
++
++      va_start(args, fmt);
++      ret = ____request_module(wait, NULL, fmt, args);
++      va_end(args);
++
++      return ret;
++}
++
+ EXPORT_SYMBOL(__request_module);
+ #endif /* CONFIG_MODULES */
+@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
+                *
+                * Thus the __user pointer cast is valid here.
+                */
+-              sys_wait4(pid, (int __user *)&ret, 0, NULL);
++              sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
+               /*
+                * If ret is 0, either ____call_usermodehelper failed and the
+@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
+ static int proc_cap_handler(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
+       kernel_cap_t new_cap;
+       int err, i;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index bddf3b2..233bf40 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -31,6 +31,9 @@
+  *            <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+  *            <prasanna@in.ibm.com> added function-return probes.
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kprobes.h>
+ #include <linux/hash.h>
+ #include <linux/init.h>
+@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
+        * kernel image and loaded module images reside. This is required
+        * so x86_64 can correctly handle the %rip-relative fixups.
+        */
+-      kip->insns = module_alloc(PAGE_SIZE);
++      kip->insns = module_alloc_exec(PAGE_SIZE);
+       if (!kip->insns) {
+               kfree(kip);
+               return NULL;
+@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
+                */
+               if (!list_is_singular(&kip->list)) {
+                       list_del(&kip->list);
+-                      module_free(NULL, kip->insns);
++                      module_free_exec(NULL, kip->insns);
+                       kfree(kip);
+               }
+               return 1;
+@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
+ {
+       int i, err = 0;
+       unsigned long offset = 0, size = 0;
+-      char *modname, namebuf[128];
++      char *modname, namebuf[KSYM_NAME_LEN];
+       const char *symbol_name;
+       void *addr;
+       struct kprobe_blackpoint *kb;
+@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
+               kprobe_type = "k";
+       if (sym)
+-              seq_printf(pi, "%p  %s  %s+0x%x  %s ",
++              seq_printf(pi, "%pK  %s  %s+0x%x  %s ",
+                       p->addr, kprobe_type, sym, offset,
+                       (modname ? modname : " "));
+       else
+-              seq_printf(pi, "%p  %s  %p ",
++              seq_printf(pi, "%pK  %s  %pK ",
+                       p->addr, kprobe_type, p->addr);
+       if (!pp)
+@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
+       const char *sym = NULL;
+       unsigned int i = *(loff_t *) v;
+       unsigned long offset = 0;
+-      char *modname, namebuf[128];
++      char *modname, namebuf[KSYM_NAME_LEN];
+       head = &kprobe_table[i];
+       preempt_disable();
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 6ada93c..dce7d5d 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
+ {
+       if (count+1 > UEVENT_HELPER_PATH_LEN)
+               return -ENOENT;
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
+       memcpy(uevent_helper, buf, count);
+       uevent_helper[count] = '\0';
+       if (count && uevent_helper[count-1] == '\n')
+@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
+       return count;
+ }
+-static struct bin_attribute notes_attr = {
++static bin_attribute_no_const notes_attr __read_only = {
+       .attr = {
+               .name = "notes",
+               .mode = S_IRUGO,
+diff --git a/kernel/lockdep.c b/kernel/lockdep.c
+index 1f3186b..bb7dbc6 100644
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -596,6 +596,10 @@ static int static_obj(void *obj)
+                     end   = (unsigned long) &_end,
+                     addr  = (unsigned long) obj;
++#ifdef CONFIG_PAX_KERNEXEC
++      start = ktla_ktva(start);
++#endif
++
+       /*
+        * static variable?
+        */
+@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+       if (!static_obj(lock->key)) {
+               debug_locks_off();
+               printk("INFO: trying to register non-static key.\n");
++              printk("lock:%pS key:%pS.\n", lock, lock->key);
+               printk("the code is fine but needs lockdep annotation.\n");
+               printk("turning off the locking correctness validator.\n");
+               dump_stack();
+@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+               if (!class)
+                       return 0;
+       }
+-      atomic_inc((atomic_t *)&class->ops);
++      atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
+       if (very_verbose(class)) {
+               printk("\nacquire class [%p] %s", class->key, class->name);
+               if (class->name_version > 1)
+diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
+index b2c71c5..7b88d63 100644
+--- a/kernel/lockdep_proc.c
++++ b/kernel/lockdep_proc.c
+@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
+               return 0;
+       }
+-      seq_printf(m, "%p", class->key);
++      seq_printf(m, "%pK", class->key);
+ #ifdef CONFIG_DEBUG_LOCKDEP
+       seq_printf(m, " OPS:%8ld", class->ops);
+ #endif
+@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
+       list_for_each_entry(entry, &class->locks_after, entry) {
+               if (entry->distance == 1) {
+-                      seq_printf(m, " -> [%p] ", entry->class->key);
++                      seq_printf(m, " -> [%pK] ", entry->class->key);
+                       print_name(m, entry->class);
+                       seq_puts(m, "\n");
+               }
+@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
+               if (!class->key)
+                       continue;
+-              seq_printf(m, "[%p] ", class->key);
++              seq_printf(m, "[%pK] ", class->key);
+               print_name(m, class);
+               seq_puts(m, "\n");
+       }
+@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+               if (!i)
+                       seq_line(m, '-', 40-namelen, namelen);
+-              snprintf(ip, sizeof(ip), "[<%p>]",
++              snprintf(ip, sizeof(ip), "[<%pK>]",
+                               (void *)class->contention_point[i]);
+               seq_printf(m, "%40s %14lu %29s %pS\n",
+                          name, stats->contention_point[i],
+@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+               if (!i)
+                       seq_line(m, '-', 40-namelen, namelen);
+-              snprintf(ip, sizeof(ip), "[<%p>]",
++              snprintf(ip, sizeof(ip), "[<%pK>]",
+                               (void *)class->contending_point[i]);
+               seq_printf(m, "%40s %14lu %29s %pS\n",
+                          name, stats->contending_point[i],
+diff --git a/kernel/module.c b/kernel/module.c
+index fa53db8..6f17200 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -61,6 +61,7 @@
+ #include <linux/pfn.h>
+ #include <linux/bsearch.h>
+ #include <linux/fips.h>
++#include <linux/grsecurity.h>
+ #include <uapi/linux/module.h>
+ #include "module-internal.h"
+@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
+ /* Bounds of module allocation, for speeding __module_address.
+  * Protected by module_mutex. */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+ int register_module_notifier(struct notifier_block * nb)
+ {
+@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+               return true;
+       list_for_each_entry_rcu(mod, &modules, list) {
+-              struct symsearch arr[] = {
++              struct symsearch modarr[] = {
+                       { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+                         NOT_GPL_ONLY, false },
+                       { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+-              if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++              if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+                       return true;
+       }
+       return false;
+@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
+ static int percpu_modalloc(struct module *mod,
+                          unsigned long size, unsigned long align)
+ {
+-      if (align > PAGE_SIZE) {
++      if (align-1 >= PAGE_SIZE) {
+               printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+                      mod->name, align, PAGE_SIZE);
+               align = PAGE_SIZE;
+@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
+ static ssize_t show_coresize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+-      return sprintf(buffer, "%u\n", mk->mod->core_size);
++      return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
+ }
+ static struct module_attribute modinfo_coresize =
+@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
+ static ssize_t show_initsize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+-      return sprintf(buffer, "%u\n", mk->mod->init_size);
++      return sprintf(buffer, "%u\n", mk->mod->init_size_rx +  mk->mod->init_size_rw);
+ }
+ static struct module_attribute modinfo_initsize =
+@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
+  */
+ #ifdef CONFIG_SYSFS
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ static inline bool sect_empty(const Elf_Shdr *sect)
+ {
+       return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
+ {
+       unsigned int notes, loaded, i;
+       struct module_notes_attrs *notes_attrs;
+-      struct bin_attribute *nattr;
++      bin_attribute_no_const *nattr;
+       /* failed to create section attributes, so can't create notes */
+       if (!mod->sect_attrs)
+@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
+ static int module_add_modinfo_attrs(struct module *mod)
+ {
+       struct module_attribute *attr;
+-      struct module_attribute *temp_attr;
++      module_attribute_no_const *temp_attr;
+       int error = 0;
+       int i;
+@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
+ static void unset_module_core_ro_nx(struct module *mod)
+ {
+-      set_page_attributes(mod->module_core + mod->core_text_size,
+-              mod->module_core + mod->core_size,
++      set_page_attributes(mod->module_core_rw,
++              mod->module_core_rw + mod->core_size_rw,
+               set_memory_x);
+-      set_page_attributes(mod->module_core,
+-              mod->module_core + mod->core_ro_size,
++      set_page_attributes(mod->module_core_rx,
++              mod->module_core_rx + mod->core_size_rx,
+               set_memory_rw);
+ }
+ static void unset_module_init_ro_nx(struct module *mod)
+ {
+-      set_page_attributes(mod->module_init + mod->init_text_size,
+-              mod->module_init + mod->init_size,
++      set_page_attributes(mod->module_init_rw,
++              mod->module_init_rw + mod->init_size_rw,
+               set_memory_x);
+-      set_page_attributes(mod->module_init,
+-              mod->module_init + mod->init_ro_size,
++      set_page_attributes(mod->module_init_rx,
++              mod->module_init_rx + mod->init_size_rx,
+               set_memory_rw);
+ }
+@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+-              if ((mod->module_core) && (mod->core_text_size)) {
+-                      set_page_attributes(mod->module_core,
+-                                              mod->module_core + mod->core_text_size,
++              if ((mod->module_core_rx) && (mod->core_size_rx)) {
++                      set_page_attributes(mod->module_core_rx,
++                                              mod->module_core_rx + mod->core_size_rx,
+                                               set_memory_rw);
+               }
+-              if ((mod->module_init) && (mod->init_text_size)) {
+-                      set_page_attributes(mod->module_init,
+-                                              mod->module_init + mod->init_text_size,
++              if ((mod->module_init_rx) && (mod->init_size_rx)) {
++                      set_page_attributes(mod->module_init_rx,
++                                              mod->module_init_rx + mod->init_size_rx,
+                                               set_memory_rw);
+               }
+       }
+@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+-              if ((mod->module_core) && (mod->core_text_size)) {
+-                      set_page_attributes(mod->module_core,
+-                                              mod->module_core + mod->core_text_size,
++              if ((mod->module_core_rx) && (mod->core_size_rx)) {
++                      set_page_attributes(mod->module_core_rx,
++                                              mod->module_core_rx + mod->core_size_rx,
+                                               set_memory_ro);
+               }
+-              if ((mod->module_init) && (mod->init_text_size)) {
+-                      set_page_attributes(mod->module_init,
+-                                              mod->module_init + mod->init_text_size,
++              if ((mod->module_init_rx) && (mod->init_size_rx)) {
++                      set_page_attributes(mod->module_init_rx,
++                                              mod->module_init_rx + mod->init_size_rx,
+                                               set_memory_ro);
+               }
+       }
+@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
+       /* This may be NULL, but that's OK */
+       unset_module_init_ro_nx(mod);
+-      module_free(mod, mod->module_init);
++      module_free(mod, mod->module_init_rw);
++      module_free_exec(mod, mod->module_init_rx);
+       kfree(mod->args);
+       percpu_modfree(mod);
+       /* Free lock-classes: */
+-      lockdep_free_key_range(mod->module_core, mod->core_size);
++      lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
++      lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
+       /* Finally, free the core (containing the module structure) */
+       unset_module_core_ro_nx(mod);
+-      module_free(mod, mod->module_core);
++      module_free_exec(mod, mod->module_core_rx);
++      module_free(mod, mod->module_core_rw);
+ #ifdef CONFIG_MPU
+       update_protections(current->mm);
+@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+       int ret = 0;
+       const struct kernel_symbol *ksym;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      int is_fs_load = 0;
++      int register_filesystem_found = 0;
++      char *p;
++
++      p = strstr(mod->args, "grsec_modharden_fs");
++      if (p) {
++              char *endptr = p + sizeof("grsec_modharden_fs") - 1;
++              /* copy \0 as well */
++              memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
++              is_fs_load = 1;
++      }
++#endif
++
+       for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
+               const char *name = info->strtab + sym[i].st_name;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++              /* it's a real shame this will never get ripped and copied
++                 upstream! ;(
++              */
++              if (is_fs_load && !strcmp(name, "register_filesystem"))
++                      register_filesystem_found = 1;
++#endif
++
+               switch (sym[i].st_shndx) {
+               case SHN_COMMON:
+                       /* We compiled with -fno-common.  These are not
+@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+                       ksym = resolve_symbol_wait(mod, info, name);
+                       /* Ok if resolved.  */
+                       if (ksym && !IS_ERR(ksym)) {
++                              pax_open_kernel();
+                               sym[i].st_value = ksym->value;
++                              pax_close_kernel();
+                               break;
+                       }
+@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+                               secbase = (unsigned long)mod_percpu(mod);
+                       else
+                               secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
++                      pax_open_kernel();
+                       sym[i].st_value += secbase;
++                      pax_close_kernel();
+                       break;
+               }
+       }
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (is_fs_load && !register_filesystem_found) {
++              printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
++              ret = -EPERM;
++      }
++#endif
++
+       return ret;
+ }
+@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
+                           || s->sh_entsize != ~0UL
+                           || strstarts(sname, ".init"))
+                               continue;
+-                      s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
++                      if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++                              s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
++                      else
++                              s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
+                       pr_debug("\t%s\n", sname);
+               }
+-              switch (m) {
+-              case 0: /* executable */
+-                      mod->core_size = debug_align(mod->core_size);
+-                      mod->core_text_size = mod->core_size;
+-                      break;
+-              case 1: /* RO: text and ro-data */
+-                      mod->core_size = debug_align(mod->core_size);
+-                      mod->core_ro_size = mod->core_size;
+-                      break;
+-              case 3: /* whole core */
+-                      mod->core_size = debug_align(mod->core_size);
+-                      break;
+-              }
+       }
+       pr_debug("Init section allocation order:\n");
+@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
+                           || s->sh_entsize != ~0UL
+                           || !strstarts(sname, ".init"))
+                               continue;
+-                      s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+-                                       | INIT_OFFSET_MASK);
++                      if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++                              s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
++                      else
++                              s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
++                      s->sh_entsize |= INIT_OFFSET_MASK;
+                       pr_debug("\t%s\n", sname);
+               }
+-              switch (m) {
+-              case 0: /* executable */
+-                      mod->init_size = debug_align(mod->init_size);
+-                      mod->init_text_size = mod->init_size;
+-                      break;
+-              case 1: /* RO: text and ro-data */
+-                      mod->init_size = debug_align(mod->init_size);
+-                      mod->init_ro_size = mod->init_size;
+-                      break;
+-              case 3: /* whole init */
+-                      mod->init_size = debug_align(mod->init_size);
+-                      break;
+-              }
+       }
+ }
+@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+       /* Put symbol section at end of init part of module. */
+       symsect->sh_flags |= SHF_ALLOC;
+-      symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
++      symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
+                                        info->index.sym) | INIT_OFFSET_MASK;
+       pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
+@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+       }
+       /* Append room for core symbols at end of core part. */
+-      info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+-      info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
+-      mod->core_size += strtab_size;
++      info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
++      info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
++      mod->core_size_rx += strtab_size;
+       /* Put string table section at end of init part of module. */
+       strsect->sh_flags |= SHF_ALLOC;
+-      strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
++      strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
+                                        info->index.str) | INIT_OFFSET_MASK;
+       pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+ }
+@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+       /* Make sure we get permanent strtab: don't use info->strtab. */
+       mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
++      pax_open_kernel();
++
+       /* Set types up while we still have access to sections. */
+       for (i = 0; i < mod->num_symtab; i++)
+               mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+-      mod->core_symtab = dst = mod->module_core + info->symoffs;
+-      mod->core_strtab = s = mod->module_core + info->stroffs;
++      mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
++      mod->core_strtab = s = mod->module_core_rx + info->stroffs;
+       src = mod->symtab;
+       for (ndst = i = 0; i < mod->num_symtab; i++) {
+               if (i == 0 ||
+@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+               }
+       }
+       mod->core_num_syms = ndst;
++
++      pax_close_kernel();
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
+       return vmalloc_exec(size);
+ }
+-static void *module_alloc_update_bounds(unsigned long size)
++static void *module_alloc_update_bounds_rw(unsigned long size)
+ {
+       void *ret = module_alloc(size);
+       if (ret) {
+               mutex_lock(&module_mutex);
+               /* Update module bounds. */
+-              if ((unsigned long)ret < module_addr_min)
+-                      module_addr_min = (unsigned long)ret;
+-              if ((unsigned long)ret + size > module_addr_max)
+-                      module_addr_max = (unsigned long)ret + size;
++              if ((unsigned long)ret < module_addr_min_rw)
++                      module_addr_min_rw = (unsigned long)ret;
++              if ((unsigned long)ret + size > module_addr_max_rw)
++                      module_addr_max_rw = (unsigned long)ret + size;
++              mutex_unlock(&module_mutex);
++      }
++      return ret;
++}
++
++static void *module_alloc_update_bounds_rx(unsigned long size)
++{
++      void *ret = module_alloc_exec(size);
++
++      if (ret) {
++              mutex_lock(&module_mutex);
++              /* Update module bounds. */
++              if ((unsigned long)ret < module_addr_min_rx)
++                      module_addr_min_rx = (unsigned long)ret;
++              if ((unsigned long)ret + size > module_addr_max_rx)
++                      module_addr_max_rx = (unsigned long)ret + size;
+               mutex_unlock(&module_mutex);
+       }
+       return ret;
+@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+ {
+       const char *modmagic = get_modinfo(info, "vermagic");
++      const char *license = get_modinfo(info, "license");
+       int err;
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      if (!license || !license_is_gpl_compatible(license))
++              return -ENOEXEC;
++#endif
++
+       if (flags & MODULE_INIT_IGNORE_VERMAGIC)
+               modmagic = NULL;
+@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+       }
+       /* Set up license info based on the info section */
+-      set_license(mod, get_modinfo(info, "license"));
++      set_license(mod, license);
+       return 0;
+ }
+@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
+       void *ptr;
+       /* Do the allocs. */
+-      ptr = module_alloc_update_bounds(mod->core_size);
++      ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
+       /*
+        * The pointer to this block is stored in the module structure
+        * which is inside the block. Just mark it as not being a
+@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
+       if (!ptr)
+               return -ENOMEM;
+-      memset(ptr, 0, mod->core_size);
+-      mod->module_core = ptr;
++      memset(ptr, 0, mod->core_size_rw);
++      mod->module_core_rw = ptr;
+-      if (mod->init_size) {
+-              ptr = module_alloc_update_bounds(mod->init_size);
++      if (mod->init_size_rw) {
++              ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
+               /*
+                * The pointer to this block is stored in the module structure
+                * which is inside the block. This block doesn't need to be
+@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
+                */
+               kmemleak_ignore(ptr);
+               if (!ptr) {
+-                      module_free(mod, mod->module_core);
++                      module_free(mod, mod->module_core_rw);
+                       return -ENOMEM;
+               }
+-              memset(ptr, 0, mod->init_size);
+-              mod->module_init = ptr;
++              memset(ptr, 0, mod->init_size_rw);
++              mod->module_init_rw = ptr;
+       } else
+-              mod->module_init = NULL;
++              mod->module_init_rw = NULL;
++
++      ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
++      kmemleak_not_leak(ptr);
++      if (!ptr) {
++              if (mod->module_init_rw)
++                      module_free(mod, mod->module_init_rw);
++              module_free(mod, mod->module_core_rw);
++              return -ENOMEM;
++      }
++
++      pax_open_kernel();
++      memset(ptr, 0, mod->core_size_rx);
++      pax_close_kernel();
++      mod->module_core_rx = ptr;
++
++      if (mod->init_size_rx) {
++              ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
++              kmemleak_ignore(ptr);
++              if (!ptr && mod->init_size_rx) {
++                      module_free_exec(mod, mod->module_core_rx);
++                      if (mod->module_init_rw)
++                              module_free(mod, mod->module_init_rw);
++                      module_free(mod, mod->module_core_rw);
++                      return -ENOMEM;
++              }
++
++              pax_open_kernel();
++              memset(ptr, 0, mod->init_size_rx);
++              pax_close_kernel();
++              mod->module_init_rx = ptr;
++      } else
++              mod->module_init_rx = NULL;
+       /* Transfer each section which specifies SHF_ALLOC */
+       pr_debug("final section addresses:\n");
+@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
+               if (!(shdr->sh_flags & SHF_ALLOC))
+                       continue;
+-              if (shdr->sh_entsize & INIT_OFFSET_MASK)
+-                      dest = mod->module_init
+-                              + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+-              else
+-                      dest = mod->module_core + shdr->sh_entsize;
++              if (shdr->sh_entsize & INIT_OFFSET_MASK) {
++                      if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++                              dest = mod->module_init_rw
++                                      + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++                      else
++                              dest = mod->module_init_rx
++                                      + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++              } else {
++                      if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++                              dest = mod->module_core_rw + shdr->sh_entsize;
++                      else
++                              dest = mod->module_core_rx + shdr->sh_entsize;
++              }
++
++              if (shdr->sh_type != SHT_NOBITS) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_64
++                      if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
++                              set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
++#endif
++                      if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
++                              pax_open_kernel();
++                              memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++                              pax_close_kernel();
++                      } else
++#endif
+-              if (shdr->sh_type != SHT_NOBITS)
+                       memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++              }
+               /* Update sh_addr to point to copy in image. */
+-              shdr->sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++              if (shdr->sh_flags & SHF_EXECINSTR)
++                      shdr->sh_addr = ktva_ktla((unsigned long)dest);
++              else
++#endif
++
++                      shdr->sh_addr = (unsigned long)dest;
+               pr_debug("\t0x%lx %s\n",
+                        (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
+       }
+@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
+        * Do it before processing of module parameters, so the module
+        * can provide parameter accessor functions of its own.
+        */
+-      if (mod->module_init)
+-              flush_icache_range((unsigned long)mod->module_init,
+-                                 (unsigned long)mod->module_init
+-                                 + mod->init_size);
+-      flush_icache_range((unsigned long)mod->module_core,
+-                         (unsigned long)mod->module_core + mod->core_size);
++      if (mod->module_init_rx)
++              flush_icache_range((unsigned long)mod->module_init_rx,
++                                 (unsigned long)mod->module_init_rx
++                                 + mod->init_size_rx);
++      flush_icache_range((unsigned long)mod->module_core_rx,
++                         (unsigned long)mod->module_core_rx + mod->core_size_rx);
+       set_fs(old_fs);
+ }
+@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
+ static void module_deallocate(struct module *mod, struct load_info *info)
+ {
+       percpu_modfree(mod);
+-      module_free(mod, mod->module_init);
+-      module_free(mod, mod->module_core);
++      module_free_exec(mod, mod->module_init_rx);
++      module_free_exec(mod, mod->module_core_rx);
++      module_free(mod, mod->module_init_rw);
++      module_free(mod, mod->module_core_rw);
+ }
+ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+ static int post_relocation(struct module *mod, const struct load_info *info)
+ {
+       /* Sort exception table now relocations are done. */
++      pax_open_kernel();
+       sort_extable(mod->extable, mod->extable + mod->num_exentries);
++      pax_close_kernel();
+       /* Copy relocated percpu area over. */
+       percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
+@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
+                       MODULE_STATE_COMING, mod);
+       /* Set RO and NX regions for core */
+-      set_section_ro_nx(mod->module_core,
+-                              mod->core_text_size,
+-                              mod->core_ro_size,
+-                              mod->core_size);
++      set_section_ro_nx(mod->module_core_rx,
++                              mod->core_size_rx,
++                              mod->core_size_rx,
++                              mod->core_size_rx);
+       /* Set RO and NX regions for init */
+-      set_section_ro_nx(mod->module_init,
+-                              mod->init_text_size,
+-                              mod->init_ro_size,
+-                              mod->init_size);
++      set_section_ro_nx(mod->module_init_rx,
++                              mod->init_size_rx,
++                              mod->init_size_rx,
++                              mod->init_size_rx);
+       do_mod_ctors(mod);
+       /* Start the module */
+@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
+       mod->strtab = mod->core_strtab;
+ #endif
+       unset_module_init_ro_nx(mod);
+-      module_free(mod, mod->module_init);
+-      mod->module_init = NULL;
+-      mod->init_size = 0;
+-      mod->init_ro_size = 0;
+-      mod->init_text_size = 0;
++      module_free(mod, mod->module_init_rw);
++      module_free_exec(mod, mod->module_init_rx);
++      mod->module_init_rw = NULL;
++      mod->module_init_rx = NULL;
++      mod->init_size_rw = 0;
++      mod->init_size_rx = 0;
+       mutex_unlock(&module_mutex);
+       wake_up_all(&module_wq);
+@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
+       if (err)
+               goto free_unload;
++      /* Now copy in args */
++      mod->args = strndup_user(uargs, ~0UL >> 1);
++      if (IS_ERR(mod->args)) {
++              err = PTR_ERR(mod->args);
++              goto free_unload;
++      }
++
+       /* Set up MODINFO_ATTR fields */
+       setup_modinfo(mod, info);
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      {
++              char *p, *p2;
++
++              if (strstr(mod->args, "grsec_modharden_netdev")) {
++                      printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
++                      err = -EPERM;
++                      goto free_modinfo;
++              } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
++                      p += sizeof("grsec_modharden_normal") - 1;
++                      p2 = strstr(p, "_");
++                      if (p2) {
++                              *p2 = '\0';
++                              printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
++                              *p2 = '_';
++                      }
++                      err = -EPERM;
++                      goto free_modinfo;
++              }
++      }
++#endif
++
+       /* Fix up syms, so that st_value is a pointer to location. */
+       err = simplify_symbols(mod, info);
+       if (err < 0)
+@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+       flush_module_icache(mod);
+-      /* Now copy in args */
+-      mod->args = strndup_user(uargs, ~0UL >> 1);
+-      if (IS_ERR(mod->args)) {
+-              err = PTR_ERR(mod->args);
+-              goto free_arch_cleanup;
+-      }
+-
+       dynamic_debug_setup(info->debug, info->num_debug);
+       /* Finally it's fully formed, ready to start executing. */
+@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+  ddebug_cleanup:
+       dynamic_debug_remove(info->debug);
+       synchronize_sched();
+-      kfree(mod->args);
+- free_arch_cleanup:
+       module_arch_cleanup(mod);
+  free_modinfo:
+       free_modinfo(mod);
++      kfree(mod->args);
+  free_unload:
+       module_unload_free(mod);
+  unlink_mod:
+@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
+       unsigned long nextval;
+       /* At worse, next value is at end of module */
+-      if (within_module_init(addr, mod))
+-              nextval = (unsigned long)mod->module_init+mod->init_text_size;
++      if (within_module_init_rx(addr, mod))
++              nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
++      else if (within_module_init_rw(addr, mod))
++              nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
++      else if (within_module_core_rx(addr, mod))
++              nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
++      else if (within_module_core_rw(addr, mod))
++              nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
+       else
+-              nextval = (unsigned long)mod->module_core+mod->core_text_size;
++              return NULL;
+       /* Scan for closest preceding symbol, and next symbol. (ELF
+          starts real symbols at 1). */
+@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
+               return 0;
+       seq_printf(m, "%s %u",
+-                 mod->name, mod->init_size + mod->core_size);
++                 mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
+       print_unload_info(m, mod);
+       /* Informative for users. */
+@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
+                  mod->state == MODULE_STATE_COMING ? "Loading":
+                  "Live");
+       /* Used by oprofile and other similar tools. */
+-      seq_printf(m, " 0x%pK", mod->module_core);
++      seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
+       /* Taints info */
+       if (mod->taints)
+@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
+ static int __init proc_modules_init(void)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
++#else
+       proc_create("modules", 0, NULL, &proc_modules_operations);
++#endif
++#else
++      proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#endif
+       return 0;
+ }
+ module_init(proc_modules_init);
+@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
+ {
+       struct module *mod;
+-      if (addr < module_addr_min || addr > module_addr_max)
++      if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++          (addr < module_addr_min_rw || addr > module_addr_max_rw))
+               return NULL;
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+-              if (within_module_core(addr, mod)
+-                  || within_module_init(addr, mod))
++              if (within_module_init(addr, mod) || within_module_core(addr, mod))
+                       return mod;
+       }
+       return NULL;
+@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
+  */
+ struct module *__module_text_address(unsigned long addr)
+ {
+-      struct module *mod = __module_address(addr);
++      struct module *mod;
++
++#ifdef CONFIG_X86_32
++      addr = ktla_ktva(addr);
++#endif
++
++      if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++              return NULL;
++
++      mod = __module_address(addr);
++
+       if (mod) {
+               /* Make sure it's within the text section. */
+-              if (!within(addr, mod->module_init, mod->init_text_size)
+-                  && !within(addr, mod->module_core, mod->core_text_size))
++              if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
+                       mod = NULL;
+       }
+       return mod;
+diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
+index 7e3443f..b2a1e6b 100644
+--- a/kernel/mutex-debug.c
++++ b/kernel/mutex-debug.c
+@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
+ }
+ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+-                          struct thread_info *ti)
++                          struct task_struct *task)
+ {
+       SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+       /* Mark the current thread as blocked on the lock: */
+-      ti->task->blocked_on = waiter;
++      task->blocked_on = waiter;
+ }
+ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+-                       struct thread_info *ti)
++                       struct task_struct *task)
+ {
+       DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+-      DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
+-      DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
+-      ti->task->blocked_on = NULL;
++      DEBUG_LOCKS_WARN_ON(waiter->task != task);
++      DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
++      task->blocked_on = NULL;
+       list_del_init(&waiter->list);
+       waiter->task = NULL;
+diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
+index 0799fd3..d06ae3b 100644
+--- a/kernel/mutex-debug.h
++++ b/kernel/mutex-debug.h
+@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
+ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+                                  struct mutex_waiter *waiter,
+-                                 struct thread_info *ti);
++                                 struct task_struct *task);
+ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+-                              struct thread_info *ti);
++                              struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+                            struct lock_class_key *key);
+diff --git a/kernel/mutex.c b/kernel/mutex.c
+index ad53a66..f1bf8bc 100644
+--- a/kernel/mutex.c
++++ b/kernel/mutex.c
+@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
+               node->locked = 1;
+               return;
+       }
+-      ACCESS_ONCE(prev->next) = node;
++      ACCESS_ONCE_RW(prev->next) = node;
+       smp_wmb();
+       /* Wait until the lock holder passes the lock down */
+       while (!ACCESS_ONCE(node->locked))
+@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
+               while (!(next = ACCESS_ONCE(node->next)))
+                       arch_mutex_cpu_relax();
+       }
+-      ACCESS_ONCE(next->locked) = 1;
++      ACCESS_ONCE_RW(next->locked) = 1;
+       smp_wmb();
+ }
+@@ -341,7 +341,7 @@ slowpath:
+       spin_lock_mutex(&lock->wait_lock, flags);
+       debug_mutex_lock_common(lock, &waiter);
+-      debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
++      debug_mutex_add_waiter(lock, &waiter, task);
+       /* add waiting tasks to the end of the waitqueue (FIFO): */
+       list_add_tail(&waiter.list, &lock->wait_list);
+@@ -371,8 +371,7 @@ slowpath:
+                * TASK_UNINTERRUPTIBLE case.)
+                */
+               if (unlikely(signal_pending_state(state, task))) {
+-                      mutex_remove_waiter(lock, &waiter,
+-                                          task_thread_info(task));
++                      mutex_remove_waiter(lock, &waiter, task);
+                       mutex_release(&lock->dep_map, 1, ip);
+                       spin_unlock_mutex(&lock->wait_lock, flags);
+@@ -391,7 +390,7 @@ slowpath:
+ done:
+       lock_acquired(&lock->dep_map, ip);
+       /* got the lock - rejoice! */
+-      mutex_remove_waiter(lock, &waiter, current_thread_info());
++      mutex_remove_waiter(lock, &waiter, task);
+       mutex_set_owner(lock);
+       /* set it to 0 if there are no waiters left: */
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index 2d5cc4c..d9ea600 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -5,6 +5,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/vmalloc.h>
+ #include <linux/reboot.h>
++#include <linux/mm.h>
+ /*
+  *    Notifier list for kernel code which wants to be called
+@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
+       while ((*nl) != NULL) {
+               if (n->priority > (*nl)->priority)
+                       break;
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+-      n->next = *nl;
++      pax_open_kernel();
++      *(const void **)&n->next = *nl;
+       rcu_assign_pointer(*nl, n);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
+                       return 0;
+               if (n->priority > (*nl)->priority)
+                       break;
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+-      n->next = *nl;
++      pax_open_kernel();
++      *(const void **)&n->next = *nl;
+       rcu_assign_pointer(*nl, n);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
+ {
+       while ((*nl) != NULL) {
+               if ((*nl) == n) {
++                      pax_open_kernel();
+                       rcu_assign_pointer(*nl, n->next);
++                      pax_close_kernel();
+                       return 0;
+               }
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+       return -ENOENT;
+ }
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 167ec09..0dda5f9 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
+                                unsigned taint, struct slowpath_args *args)
+ {
+       printk(KERN_WARNING "------------[ cut here ]------------\n");
+-      printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
++      printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
+       if (args)
+               vprintk(args->fmt, args->args);
+@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+  */
+ void __stack_chk_fail(void)
+ {
+-      panic("stack-protector: Kernel stack is corrupted in: %p\n",
++      dump_stack();
++      panic("stack-protector: Kernel stack is corrupted in: %pA\n",
+               __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 0db3e79..95b9dc2 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -33,6 +33,7 @@
+ #include <linux/rculist.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/security.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
+@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
+ int pid_max = PID_MAX_DEFAULT;
+-#define RESERVED_PIDS         300
++#define RESERVED_PIDS         500
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
+  */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
++      struct task_struct *task;
++
+       rcu_lockdep_assert(rcu_read_lock_held(),
+                          "find_task_by_pid_ns() needs rcu_read_lock()"
+                          " protection");
+-      return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++      task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++      if (gr_pid_is_chrooted(task))
++              return NULL;
++
++      return task;
+ }
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
+       return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
+ }
++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
++{
++      rcu_lockdep_assert(rcu_read_lock_held(),
++                         "find_task_by_pid_ns() needs rcu_read_lock()"
++                         " protection");
++      return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
++}
++
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
+       struct pid *pid;
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 6917e8e..9909aeb 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       struct pid_namespace *pid_ns = task_active_pid_ns(current);
+-      struct ctl_table tmp = *table;
++      ctl_table_no_const tmp = *table;
+       if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
+               return -EPERM;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 42670e9..8719c2f 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
+ static __init int init_posix_cpu_timers(void)
+ {
+-      struct k_clock process = {
++      static struct k_clock process = {
+               .clock_getres   = process_cpu_clock_getres,
+               .clock_get      = process_cpu_clock_get,
+               .timer_create   = process_cpu_timer_create,
+               .nsleep         = process_cpu_nsleep,
+               .nsleep_restart = process_cpu_nsleep_restart,
+       };
+-      struct k_clock thread = {
++      static struct k_clock thread = {
+               .clock_getres   = thread_cpu_clock_getres,
+               .clock_get      = thread_cpu_clock_get,
+               .timer_create   = thread_cpu_timer_create,
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 424c2d4..679242f 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -43,6 +43,7 @@
+ #include <linux/hash.h>
+ #include <linux/posix-clock.h>
+ #include <linux/posix-timers.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscalls.h>
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
+  *        which we beg off on and pass to do_sys_settimeofday().
+  */
+-static struct k_clock posix_clocks[MAX_CLOCKS];
++static struct k_clock *posix_clocks[MAX_CLOCKS];
+ /*
+  * These ones are defined below.
+@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
+  */
+ static __init int init_posix_timers(void)
+ {
+-      struct k_clock clock_realtime = {
++      static struct k_clock clock_realtime = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_clock_realtime_get,
+               .clock_set      = posix_clock_realtime_set,
+@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_monotonic = {
++      static struct k_clock clock_monotonic = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_ktime_get_ts,
+               .nsleep         = common_nsleep,
+@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_monotonic_raw = {
++      static struct k_clock clock_monotonic_raw = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_monotonic_raw,
+       };
+-      struct k_clock clock_realtime_coarse = {
++      static struct k_clock clock_realtime_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_realtime_coarse,
+       };
+-      struct k_clock clock_monotonic_coarse = {
++      static struct k_clock clock_monotonic_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_monotonic_coarse,
+       };
+-      struct k_clock clock_tai = {
++      static struct k_clock clock_tai = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_tai,
+               .nsleep         = common_nsleep,
+@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_boottime = {
++      static struct k_clock clock_boottime = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_boottime,
+               .nsleep         = common_nsleep,
+@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
+               return;
+       }
+-      posix_clocks[clock_id] = *new_clock;
++      posix_clocks[clock_id] = new_clock;
+ }
+ EXPORT_SYMBOL_GPL(posix_timers_register_clock);
+@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
+               return (id & CLOCKFD_MASK) == CLOCKFD ?
+                       &clock_posix_dynamic : &clock_posix_cpu;
+-      if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
++      if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
+               return NULL;
+-      return &posix_clocks[id];
++      return posix_clocks[id];
+ }
+ static int common_timer_create(struct k_itimer *new_timer)
+@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct k_itimer *new_timer;
+       int error, new_timer_id;
+-      sigevent_t event;
++      sigevent_t event = { };
+       int it_id_set = IT_ID_NOT_SET;
+       if (!kc)
+@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+       if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+               return -EFAULT;
++      /* only the CLOCK_REALTIME clock can be set, all other clocks
++         have their clock_set fptr set to a nosettime dummy function
++         CLOCK_REALTIME has a NULL clock_set fptr which causes it to
++         call common_clock_set, which calls do_sys_settimeofday, which
++         we hook
++      */
++
+       return kc->clock_set(which_clock, &new_tp);
+ }
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 98088e0..aaf95c0 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
+       u64 elapsed_csecs64;
+       unsigned int elapsed_csecs;
+       bool wakeup = false;
++      bool timedout = false;
+       do_gettimeofday(&start);
+@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
+       while (true) {
+               todo = 0;
++              if (time_after(jiffies, end_time))
++                      timedout = true;
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       if (p == current || !freeze_task(p))
+                               continue;
+-                      if (!freezer_should_skip(p))
++                      if (!freezer_should_skip(p)) {
+                               todo++;
++                              if (timedout) {
++                                      printk(KERN_ERR "Task refusing to freeze:\n");
++                                      sched_show_task(p);
++                              }
++                      }
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
+                       todo += wq_busy;
+               }
+-              if (!todo || time_after(jiffies, end_time))
++              if (!todo || timedout)
+                       break;
+               if (pm_wakeup_pending()) {
+diff --git a/kernel/printk.c b/kernel/printk.c
+index d37d45c..ab918b3 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
+       if (from_file && type != SYSLOG_ACTION_OPEN)
+               return 0;
++#ifdef CONFIG_GRKERNSEC_DMESG
++      if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
++              return -EPERM;
++#endif
++
+       if (syslog_action_restricted(type)) {
+               if (capable(CAP_SYSLOG))
+                       return 0;
+diff --git a/kernel/profile.c b/kernel/profile.c
+index 0bf4007..6234708 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -37,7 +37,7 @@ struct profile_hit {
+ #define NR_PROFILE_HIT                (PAGE_SIZE/sizeof(struct profile_hit))
+ #define NR_PROFILE_GRP                (NR_PROFILE_HIT/PROFILE_GRPSZ)
+-static atomic_t *prof_buffer;
++static atomic_unchecked_t *prof_buffer;
+ static unsigned long prof_len, prof_shift;
+ int prof_on __read_mostly;
+@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
+                                       hits[i].pc = 0;
+                               continue;
+                       }
+-                      atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++                      atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+                       hits[i].hits = hits[i].pc = 0;
+               }
+       }
+@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+        * Add the current hit(s) and flush the write-queue out
+        * to the global buffer:
+        */
+-      atomic_add(nr_hits, &prof_buffer[pc]);
++      atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
+       for (i = 0; i < NR_PROFILE_HIT; ++i) {
+-              atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++              atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+               hits[i].pc = hits[i].hits = 0;
+       }
+ out:
+@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+ {
+       unsigned long pc;
+       pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
+-      atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
++      atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ }
+ #endif /* !CONFIG_SMP */
+@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+                       return -EFAULT;
+               buf++; p++; count--; read++;
+       }
+-      pnt = (char *)prof_buffer + p - sizeof(atomic_t);
++      pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
+       if (copy_to_user(buf, (void *)pnt, count))
+               return -EFAULT;
+       read += count;
+@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
+       }
+ #endif
+       profile_discard_flip_buffers();
+-      memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
++      memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
+       return count;
+ }
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 335a7ae..3bbbceb 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+       if (seize)
+               flags |= PT_SEIZED;
+       rcu_read_lock();
+-      if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
++      if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
+               flags |= PT_PTRACE_CAP;
+       rcu_read_unlock();
+       task->ptrace = flags;
+@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+                               break;
+                       return -EIO;
+               }
+-              if (copy_to_user(dst, buf, retval))
++              if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
+                       return -EFAULT;
+               copied += retval;
+               src += retval;
+@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
+       bool seized = child->ptrace & PT_SEIZED;
+       int ret = -EIO;
+       siginfo_t siginfo, *si;
+-      void __user *datavp = (void __user *) data;
++      void __user *datavp = (__force void __user *) data;
+       unsigned long __user *datalp = datavp;
+       unsigned long flags;
+@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+               goto out;
+       }
++      if (gr_handle_ptrace(child, request)) {
++              ret = -EPERM;
++              goto out_put_task_struct;
++      }
++
+       if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+               ret = ptrace_attach(child, request, addr, data);
+               /*
+                * Some architectures need to do book-keeping after
+                * a ptrace attach.
+                */
+-              if (!ret)
++              if (!ret) {
+                       arch_ptrace_attach(child);
++                      gr_audit_ptrace(child);
++              }
+               goto out_put_task_struct;
+       }
+@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+       copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+       if (copied != sizeof(tmp))
+               return -EIO;
+-      return put_user(tmp, (unsigned long __user *)data);
++      return put_user(tmp, (__force unsigned long __user *)data);
+ }
+ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ }
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+-                                compat_long_t addr, compat_long_t data)
++                                compat_ulong_t addr, compat_ulong_t data)
+ {
+       struct task_struct *child;
+       long ret;
+@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+               goto out;
+       }
++      if (gr_handle_ptrace(child, request)) {
++              ret = -EPERM;
++              goto out_put_task_struct;
++      }
++
+       if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+               ret = ptrace_attach(child, request, addr, data);
+               /*
+                * Some architectures need to do book-keeping after
+                * a ptrace attach.
+                */
+-              if (!ret)
++              if (!ret) {
+                       arch_ptrace_attach(child);
++                      gr_audit_ptrace(child);
++              }
+               goto out_put_task_struct;
+       }
+diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
+index 48ab703..07561d4 100644
+--- a/kernel/rcupdate.c
++++ b/kernel/rcupdate.c
+@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
+        * for CONFIG_RCU_CPU_STALL_TIMEOUT.
+        */
+       if (till_stall_check < 3) {
+-              ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
++              ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
+               till_stall_check = 3;
+       } else if (till_stall_check > 300) {
+-              ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
++              ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
+               till_stall_check = 300;
+       }
+       return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
+diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
+index a0714a5..2ab5e34 100644
+--- a/kernel/rcutiny.c
++++ b/kernel/rcutiny.c
+@@ -46,7 +46,7 @@
+ struct rcu_ctrlblk;
+ static void invoke_rcu_callbacks(void);
+ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+-static void rcu_process_callbacks(struct softirq_action *unused);
++static void rcu_process_callbacks(void);
+ static void __call_rcu(struct rcu_head *head,
+                      void (*func)(struct rcu_head *rcu),
+                      struct rcu_ctrlblk *rcp);
+@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+                                     rcu_is_callbacks_kthread()));
+ }
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+       __rcu_process_callbacks(&rcu_sched_ctrlblk);
+       __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
+index 8a23300..4255818 100644
+--- a/kernel/rcutiny_plugin.h
++++ b/kernel/rcutiny_plugin.h
+@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
+               have_rcu_kthread_work = morework;
+               local_irq_restore(flags);
+               if (work)
+-                      rcu_process_callbacks(NULL);
++                      rcu_process_callbacks();
+               schedule_timeout_interruptible(1); /* Leave CPU for others. */
+       }
+diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
+index e1f3a8c..42c94a2 100644
+--- a/kernel/rcutorture.c
++++ b/kernel/rcutorture.c
+@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
+       { 0 };
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
+       { 0 };
+-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+-static atomic_t n_rcu_torture_alloc;
+-static atomic_t n_rcu_torture_alloc_fail;
+-static atomic_t n_rcu_torture_free;
+-static atomic_t n_rcu_torture_mberror;
+-static atomic_t n_rcu_torture_error;
++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
++static atomic_unchecked_t n_rcu_torture_alloc;
++static atomic_unchecked_t n_rcu_torture_alloc_fail;
++static atomic_unchecked_t n_rcu_torture_free;
++static atomic_unchecked_t n_rcu_torture_mberror;
++static atomic_unchecked_t n_rcu_torture_error;
+ static long n_rcu_torture_barrier_error;
+ static long n_rcu_torture_boost_ktrerror;
+ static long n_rcu_torture_boost_rterror;
+@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
+       spin_lock_bh(&rcu_torture_lock);
+       if (list_empty(&rcu_torture_freelist)) {
+-              atomic_inc(&n_rcu_torture_alloc_fail);
++              atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
+               spin_unlock_bh(&rcu_torture_lock);
+               return NULL;
+       }
+-      atomic_inc(&n_rcu_torture_alloc);
++      atomic_inc_unchecked(&n_rcu_torture_alloc);
+       p = rcu_torture_freelist.next;
+       list_del_init(p);
+       spin_unlock_bh(&rcu_torture_lock);
+@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
+ static void
+ rcu_torture_free(struct rcu_torture *p)
+ {
+-      atomic_inc(&n_rcu_torture_free);
++      atomic_inc_unchecked(&n_rcu_torture_free);
+       spin_lock_bh(&rcu_torture_lock);
+       list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+       spin_unlock_bh(&rcu_torture_lock);
+@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
+       i = rp->rtort_pipe_count;
+       if (i > RCU_TORTURE_PIPE_LEN)
+               i = RCU_TORTURE_PIPE_LEN;
+-      atomic_inc(&rcu_torture_wcount[i]);
++      atomic_inc_unchecked(&rcu_torture_wcount[i]);
+       if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+               rp->rtort_mbtest = 0;
+               rcu_torture_free(rp);
+@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
+               i = rp->rtort_pipe_count;
+               if (i > RCU_TORTURE_PIPE_LEN)
+                       i = RCU_TORTURE_PIPE_LEN;
+-              atomic_inc(&rcu_torture_wcount[i]);
++              atomic_inc_unchecked(&rcu_torture_wcount[i]);
+               if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+                       rp->rtort_mbtest = 0;
+                       list_del(&rp->rtort_free);
+@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
+                       i = old_rp->rtort_pipe_count;
+                       if (i > RCU_TORTURE_PIPE_LEN)
+                               i = RCU_TORTURE_PIPE_LEN;
+-                      atomic_inc(&rcu_torture_wcount[i]);
++                      atomic_inc_unchecked(&rcu_torture_wcount[i]);
+                       old_rp->rtort_pipe_count++;
+                       cur_ops->deferred_free(old_rp);
+               }
+@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
+               return;
+       }
+       if (p->rtort_mbtest == 0)
+-              atomic_inc(&n_rcu_torture_mberror);
++              atomic_inc_unchecked(&n_rcu_torture_mberror);
+       spin_lock(&rand_lock);
+       cur_ops->read_delay(&rand);
+       n_rcu_torture_timers++;
+@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
+                       continue;
+               }
+               if (p->rtort_mbtest == 0)
+-                      atomic_inc(&n_rcu_torture_mberror);
++                      atomic_inc_unchecked(&n_rcu_torture_mberror);
+               cur_ops->read_delay(&rand);
+               preempt_disable();
+               pipe_count = p->rtort_pipe_count;
+@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
+                      rcu_torture_current,
+                      rcu_torture_current_version,
+                      list_empty(&rcu_torture_freelist),
+-                     atomic_read(&n_rcu_torture_alloc),
+-                     atomic_read(&n_rcu_torture_alloc_fail),
+-                     atomic_read(&n_rcu_torture_free));
++                     atomic_read_unchecked(&n_rcu_torture_alloc),
++                     atomic_read_unchecked(&n_rcu_torture_alloc_fail),
++                     atomic_read_unchecked(&n_rcu_torture_free));
+       cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
+-                     atomic_read(&n_rcu_torture_mberror),
++                     atomic_read_unchecked(&n_rcu_torture_mberror),
+                      n_rcu_torture_boost_ktrerror,
+                      n_rcu_torture_boost_rterror);
+       cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
+@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
+                      n_barrier_attempts,
+                      n_rcu_torture_barrier_error);
+       cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+-      if (atomic_read(&n_rcu_torture_mberror) != 0 ||
++      if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
+           n_rcu_torture_barrier_error != 0 ||
+           n_rcu_torture_boost_ktrerror != 0 ||
+           n_rcu_torture_boost_rterror != 0 ||
+           n_rcu_torture_boost_failure != 0 ||
+           i > 1) {
+               cnt += sprintf(&page[cnt], "!!! ");
+-              atomic_inc(&n_rcu_torture_error);
++              atomic_inc_unchecked(&n_rcu_torture_error);
+               WARN_ON_ONCE(1);
+       }
+       cnt += sprintf(&page[cnt], "Reader Pipe: ");
+@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
+       cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+       for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+               cnt += sprintf(&page[cnt], " %d",
+-                             atomic_read(&rcu_torture_wcount[i]));
++                             atomic_read_unchecked(&rcu_torture_wcount[i]));
+       }
+       cnt += sprintf(&page[cnt], "\n");
+       if (cur_ops->stats)
+@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
+       rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
+-      if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
++      if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
+               rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+       else if (n_online_successes != n_online_attempts ||
+                n_offline_successes != n_offline_attempts)
+@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
+       rcu_torture_current = NULL;
+       rcu_torture_current_version = 0;
+-      atomic_set(&n_rcu_torture_alloc, 0);
+-      atomic_set(&n_rcu_torture_alloc_fail, 0);
+-      atomic_set(&n_rcu_torture_free, 0);
+-      atomic_set(&n_rcu_torture_mberror, 0);
+-      atomic_set(&n_rcu_torture_error, 0);
++      atomic_set_unchecked(&n_rcu_torture_alloc, 0);
++      atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
++      atomic_set_unchecked(&n_rcu_torture_free, 0);
++      atomic_set_unchecked(&n_rcu_torture_mberror, 0);
++      atomic_set_unchecked(&n_rcu_torture_error, 0);
+       n_rcu_torture_barrier_error = 0;
+       n_rcu_torture_boost_ktrerror = 0;
+       n_rcu_torture_boost_rterror = 0;
+       n_rcu_torture_boost_failure = 0;
+       n_rcu_torture_boosts = 0;
+       for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+-              atomic_set(&rcu_torture_wcount[i], 0);
++              atomic_set_unchecked(&rcu_torture_wcount[i], 0);
+       for_each_possible_cpu(cpu) {
+               for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+                       per_cpu(rcu_torture_count, cpu)[i] = 0;
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 3538001..e379e0b 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
+       rcu_prepare_for_idle(smp_processor_id());
+       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+       smp_mb__before_atomic_inc();  /* See above. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
+-      WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+       /*
+        * It is illegal to enter an extended quiescent state while
+@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
+                              int user)
+ {
+       smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+       smp_mb__after_atomic_inc();  /* See above. */
+-      WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++      WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+       rcu_cleanup_after_idle(smp_processor_id());
+       trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+       if (!user && !is_idle_task(current)) {
+@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+       if (rdtp->dynticks_nmi_nesting == 0 &&
+-          (atomic_read(&rdtp->dynticks) & 0x1))
++          (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
+               return;
+       rdtp->dynticks_nmi_nesting++;
+       smp_mb__before_atomic_inc();  /* Force delay from prior write. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+       smp_mb__after_atomic_inc();  /* See above. */
+-      WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++      WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+ }
+ /**
+@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
+               return;
+       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+       smp_mb__before_atomic_inc();  /* See above. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       smp_mb__after_atomic_inc();  /* Force delay to next write. */
+-      WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+ }
+ /**
+@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
+       int ret;
+       preempt_disable();
+-      ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
++      ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+       preempt_enable();
+       return ret;
+ }
+@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
+  */
+ static int dyntick_save_progress_counter(struct rcu_data *rdp)
+ {
+-      rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
++      rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+       return (rdp->dynticks_snap & 0x1) == 0;
+ }
+@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+       unsigned int curr;
+       unsigned int snap;
+-      curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
++      curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+       snap = (unsigned int)rdp->dynticks_snap;
+       /*
+@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
+               rdp = this_cpu_ptr(rsp->rda);
+               rcu_preempt_check_blocked_tasks(rnp);
+               rnp->qsmask = rnp->qsmaskinit;
+-              ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
++              ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
+               WARN_ON_ONCE(rnp->completed != rsp->completed);
+-              ACCESS_ONCE(rnp->completed) = rsp->completed;
++              ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
+               if (rnp == rdp->mynode)
+                       rcu_start_gp_per_cpu(rsp, rnp, rdp);
+               rcu_preempt_boost_start_gp(rnp);
+@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
+        */
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               raw_spin_lock_irq(&rnp->lock);
+-              ACCESS_ONCE(rnp->completed) = rsp->gpnum;
++              ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
+               rdp = this_cpu_ptr(rsp->rda);
+               if (rnp == rdp->mynode)
+                       __rcu_process_gp_end(rsp, rnp, rdp);
+@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
+               rsp->qlen += rdp->qlen;
+               rdp->n_cbs_orphaned += rdp->qlen;
+               rdp->qlen_lazy = 0;
+-              ACCESS_ONCE(rdp->qlen) = 0;
++              ACCESS_ONCE_RW(rdp->qlen) = 0;
+       }
+       /*
+@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+       }
+       smp_mb(); /* List handling before counting for rcu_barrier(). */
+       rdp->qlen_lazy -= count_lazy;
+-      ACCESS_ONCE(rdp->qlen) -= count;
++      ACCESS_ONCE_RW(rdp->qlen) -= count;
+       rdp->n_cbs_invoked += count;
+       /* Reinstate batch limit if we have worked down the excess. */
+@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
+ /*
+  * Do RCU core processing for the current CPU.
+  */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+       struct rcu_state *rsp;
+@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+               local_irq_restore(flags);
+               return;
+       }
+-      ACCESS_ONCE(rdp->qlen)++;
++      ACCESS_ONCE_RW(rdp->qlen)++;
+       if (lazy)
+               rdp->qlen_lazy++;
+       else
+@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
+        * counter wrap on a 32-bit system.  Quite a few more CPUs would of
+        * course be required on a 64-bit system.
+        */
+-      if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
++      if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
+                        (ulong)atomic_long_read(&rsp->expedited_done) +
+                        ULONG_MAX / 8)) {
+               synchronize_sched();
+-              atomic_long_inc(&rsp->expedited_wrap);
++              atomic_long_inc_unchecked(&rsp->expedited_wrap);
+               return;
+       }
+@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
+        * Take a ticket.  Note that atomic_inc_return() implies a
+        * full memory barrier.
+        */
+-      snap = atomic_long_inc_return(&rsp->expedited_start);
++      snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
+       firstsnap = snap;
+       get_online_cpus();
+       WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
+@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
+                            synchronize_sched_expedited_cpu_stop,
+                            NULL) == -EAGAIN) {
+               put_online_cpus();
+-              atomic_long_inc(&rsp->expedited_tryfail);
++              atomic_long_inc_unchecked(&rsp->expedited_tryfail);
+               /* Check to see if someone else did our work for us. */
+               s = atomic_long_read(&rsp->expedited_done);
+               if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+-                      atomic_long_inc(&rsp->expedited_workdone1);
++                      atomic_long_inc_unchecked(&rsp->expedited_workdone1);
+                       return;
+               }
+@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
+                       udelay(trycount * num_online_cpus());
+               } else {
+                       wait_rcu_gp(call_rcu_sched);
+-                      atomic_long_inc(&rsp->expedited_normal);
++                      atomic_long_inc_unchecked(&rsp->expedited_normal);
+                       return;
+               }
+@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
+               if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+-                      atomic_long_inc(&rsp->expedited_workdone2);
++                      atomic_long_inc_unchecked(&rsp->expedited_workdone2);
+                       return;
+               }
+@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
+                * period works for us.
+                */
+               get_online_cpus();
+-              snap = atomic_long_read(&rsp->expedited_start);
++              snap = atomic_long_read_unchecked(&rsp->expedited_start);
+               smp_mb(); /* ensure read is before try_stop_cpus(). */
+       }
+-      atomic_long_inc(&rsp->expedited_stoppedcpus);
++      atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
+       /*
+        * Everyone up to our most recent fetch is covered by our grace
+@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
+        * than we did already did their update.
+        */
+       do {
+-              atomic_long_inc(&rsp->expedited_done_tries);
++              atomic_long_inc_unchecked(&rsp->expedited_done_tries);
+               s = atomic_long_read(&rsp->expedited_done);
+               if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+-                      atomic_long_inc(&rsp->expedited_done_lost);
++                      atomic_long_inc_unchecked(&rsp->expedited_done_lost);
+                       break;
+               }
+       } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
+-      atomic_long_inc(&rsp->expedited_done_exit);
++      atomic_long_inc_unchecked(&rsp->expedited_done_exit);
+       put_online_cpus();
+ }
+@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
+        * ACCESS_ONCE() to prevent the compiler from speculating
+        * the increment to precede the early-exit check.
+        */
+-      ACCESS_ONCE(rsp->n_barrier_done)++;
++      ACCESS_ONCE_RW(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+       _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
+       smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
+@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
+       /* Increment ->n_barrier_done to prevent duplicate work. */
+       smp_mb(); /* Keep increment after above mechanism. */
+-      ACCESS_ONCE(rsp->n_barrier_done)++;
++      ACCESS_ONCE_RW(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
+       _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
+       smp_mb(); /* Keep increment before caller's subsequent code. */
+@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
+       rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+       init_callback_list(rdp);
+       rdp->qlen_lazy = 0;
+-      ACCESS_ONCE(rdp->qlen) = 0;
++      ACCESS_ONCE_RW(rdp->qlen) = 0;
+       rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+       WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
+-      WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
+       rdp->cpu = cpu;
+       rdp->rsp = rsp;
+       rcu_boot_init_nocb_percpu_data(rdp);
+@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
+       rdp->blimit = blimit;
+       init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
+       rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+-      atomic_set(&rdp->dynticks->dynticks,
+-                 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
++      atomic_set_unchecked(&rdp->dynticks->dynticks,
++                 (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
+       raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
+       /* Add CPU to rcu_node bitmasks. */
+@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
+       struct task_struct *t;
+       for_each_rcu_flavor(rsp) {
+-              t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
++              t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
+               BUG_ON(IS_ERR(t));
+               rnp = rcu_get_root(rsp);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index 4df5034..5ee93f2 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -87,7 +87,7 @@ struct rcu_dynticks {
+       long long dynticks_nesting; /* Track irq/process nesting level. */
+                                   /* Process level is worth LLONG_MAX/2. */
+       int dynticks_nmi_nesting;   /* Track NMI nesting level. */
+-      atomic_t dynticks;          /* Even value for idle, else odd. */
++      atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
+ #ifdef CONFIG_RCU_FAST_NO_HZ
+       bool all_lazy;              /* Are all CPU's CBs lazy? */
+       unsigned long nonlazy_posted;
+@@ -414,17 +414,17 @@ struct rcu_state {
+                                               /*  _rcu_barrier(). */
+       /* End of fields guarded by barrier_mutex. */
+-      atomic_long_t expedited_start;          /* Starting ticket. */
+-      atomic_long_t expedited_done;           /* Done ticket. */
+-      atomic_long_t expedited_wrap;           /* # near-wrap incidents. */
+-      atomic_long_t expedited_tryfail;        /* # acquisition failures. */
+-      atomic_long_t expedited_workdone1;      /* # done by others #1. */
+-      atomic_long_t expedited_workdone2;      /* # done by others #2. */
+-      atomic_long_t expedited_normal;         /* # fallbacks to normal. */
+-      atomic_long_t expedited_stoppedcpus;    /* # successful stop_cpus. */
+-      atomic_long_t expedited_done_tries;     /* # tries to update _done. */
+-      atomic_long_t expedited_done_lost;      /* # times beaten to _done. */
+-      atomic_long_t expedited_done_exit;      /* # times exited _done loop. */
++      atomic_long_unchecked_t expedited_start;        /* Starting ticket. */
++      atomic_long_t expedited_done;                   /* Done ticket. */
++      atomic_long_unchecked_t expedited_wrap;         /* # near-wrap incidents. */
++      atomic_long_unchecked_t expedited_tryfail;      /* # acquisition failures. */
++      atomic_long_unchecked_t expedited_workdone1;    /* # done by others #1. */
++      atomic_long_unchecked_t expedited_workdone2;    /* # done by others #2. */
++      atomic_long_unchecked_t expedited_normal;       /* # fallbacks to normal. */
++      atomic_long_unchecked_t expedited_stoppedcpus;  /* # successful stop_cpus. */
++      atomic_long_unchecked_t expedited_done_tries;   /* # tries to update _done. */
++      atomic_long_unchecked_t expedited_done_lost;    /* # times beaten to _done. */
++      atomic_long_unchecked_t expedited_done_exit;    /* # times exited _done loop. */
+       unsigned long jiffies_force_qs;         /* Time at which to invoke */
+                                               /*  force_quiescent_state(). */
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 3db5a37..b395fb35 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
+       /* Clean up and exit. */
+       smp_mb(); /* ensure expedited GP seen before counter increment. */
+-      ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
++      ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
+ unlock_mb_ret:
+       mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ mb_ret:
+@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+       free_cpumask_var(cm);
+ }
+-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
++static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
+       .store                  = &rcu_cpu_kthread_task,
+       .thread_should_run      = rcu_cpu_kthread_should_run,
+       .thread_fn              = rcu_cpu_kthread,
+@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+       print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
+       printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
+              cpu, ticks_value, ticks_title,
+-             atomic_read(&rdtp->dynticks) & 0xfff,
++             atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
+              rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
+              rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+              fast_no_hz);
+@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
+       /* Enqueue the callback on the nocb list and update counts. */
+       old_rhpp = xchg(&rdp->nocb_tail, rhtp);
+-      ACCESS_ONCE(*old_rhpp) = rhp;
++      ACCESS_ONCE_RW(*old_rhpp) = rhp;
+       atomic_long_add(rhcount, &rdp->nocb_q_count);
+       atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
+@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
+                * Extract queued callbacks, update counts, and wait
+                * for a grace period to elapse.
+                */
+-              ACCESS_ONCE(rdp->nocb_head) = NULL;
++              ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
+               tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
+               c = atomic_long_xchg(&rdp->nocb_q_count, 0);
+               cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
+-              ACCESS_ONCE(rdp->nocb_p_count) += c;
+-              ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
++              ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
++              ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
+               rcu_nocb_wait_gp(rdp);
+               /* Each pass through the following loop invokes a callback. */
+@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
+                       list = next;
+               }
+               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+-              ACCESS_ONCE(rdp->nocb_p_count) -= c;
+-              ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
++              ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
++              ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
+               rdp->n_nocbs_invoked += c;
+       }
+       return 0;
+@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+               t = kthread_run(rcu_nocb_kthread, rdp,
+                               "rcuo%c/%d", rsp->abbr, cpu);
+               BUG_ON(IS_ERR(t));
+-              ACCESS_ONCE(rdp->nocb_kthread) = t;
++              ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
+       }
+ }
+diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
+index cf6c174..a8f4b50 100644
+--- a/kernel/rcutree_trace.c
++++ b/kernel/rcutree_trace.c
+@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+                  ulong2long(rdp->completed), ulong2long(rdp->gpnum),
+                  rdp->passed_quiesce, rdp->qs_pending);
+       seq_printf(m, " dt=%d/%llx/%d df=%lu",
+-                 atomic_read(&rdp->dynticks->dynticks),
++                 atomic_read_unchecked(&rdp->dynticks->dynticks),
+                  rdp->dynticks->dynticks_nesting,
+                  rdp->dynticks->dynticks_nmi_nesting,
+                  rdp->dynticks_fqs);
+@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
+       struct rcu_state *rsp = (struct rcu_state *)m->private;
+       seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
+-                 atomic_long_read(&rsp->expedited_start),
++                 atomic_long_read_unchecked(&rsp->expedited_start),
+                  atomic_long_read(&rsp->expedited_done),
+-                 atomic_long_read(&rsp->expedited_wrap),
+-                 atomic_long_read(&rsp->expedited_tryfail),
+-                 atomic_long_read(&rsp->expedited_workdone1),
+-                 atomic_long_read(&rsp->expedited_workdone2),
+-                 atomic_long_read(&rsp->expedited_normal),
+-                 atomic_long_read(&rsp->expedited_stoppedcpus),
+-                 atomic_long_read(&rsp->expedited_done_tries),
+-                 atomic_long_read(&rsp->expedited_done_lost),
+-                 atomic_long_read(&rsp->expedited_done_exit));
++                 atomic_long_read_unchecked(&rsp->expedited_wrap),
++                 atomic_long_read_unchecked(&rsp->expedited_tryfail),
++                 atomic_long_read_unchecked(&rsp->expedited_workdone1),
++                 atomic_long_read_unchecked(&rsp->expedited_workdone2),
++                 atomic_long_read_unchecked(&rsp->expedited_normal),
++                 atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
++                 atomic_long_read_unchecked(&rsp->expedited_done_tries),
++                 atomic_long_read_unchecked(&rsp->expedited_done_lost),
++                 atomic_long_read_unchecked(&rsp->expedited_done_exit));
+       return 0;
+ }
+diff --git a/kernel/resource.c b/kernel/resource.c
+index d738698..5f8e60a 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
+ static int __init ioresources_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
++      proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
++      proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
++#endif
++#else
+       proc_create("ioports", 0, NULL, &proc_ioports_operations);
+       proc_create("iomem", 0, NULL, &proc_iomem_operations);
++#endif
+       return 0;
+ }
+ __initcall(ioresources_init);
+diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
+index 1d96dd0..994ff19 100644
+--- a/kernel/rtmutex-tester.c
++++ b/kernel/rtmutex-tester.c
+@@ -22,7 +22,7 @@
+ #define MAX_RT_TEST_MUTEXES   8
+ static spinlock_t rttest_lock;
+-static atomic_t rttest_event;
++static atomic_unchecked_t rttest_event;
+ struct test_thread_data {
+       int                     opcode;
+@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+       case RTTEST_LOCKCONT:
+               td->mutexes[td->opdata] = 1;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               return 0;
+       case RTTEST_RESET:
+@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+               return 0;
+       case RTTEST_RESETEVENT:
+-              atomic_set(&rttest_event, 0);
++              atomic_set_unchecked(&rttest_event, 0);
+               return 0;
+       default:
+@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+                       return ret;
+               td->mutexes[id] = 1;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               rt_mutex_lock(&mutexes[id]);
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               td->mutexes[id] = 4;
+               return 0;
+@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+                       return ret;
+               td->mutexes[id] = 1;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               td->mutexes[id] = ret ? 0 : 4;
+               return ret ? -EINTR : 0;
+@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+               if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+                       return ret;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               rt_mutex_unlock(&mutexes[id]);
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               td->mutexes[id] = 0;
+               return 0;
+@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+                       break;
+               td->mutexes[dat] = 2;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               break;
+       default:
+@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+                       return;
+               td->mutexes[dat] = 3;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               break;
+       case RTTEST_LOCKNOWAIT:
+@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+                       return;
+               td->mutexes[dat] = 1;
+-              td->event = atomic_add_return(1, &rttest_event);
++              td->event = atomic_add_return_unchecked(1, &rttest_event);
+               return;
+       default:
+diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
+index 64de5f8..7735e12 100644
+--- a/kernel/sched/auto_group.c
++++ b/kernel/sched/auto_group.c
+@@ -11,7 +11,7 @@
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+-static atomic_t autogroup_seq_nr;
++static atomic_unchecked_t autogroup_seq_nr;
+ void __init autogroup_init(struct task_struct *init_task)
+ {
+@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
+       kref_init(&ag->kref);
+       init_rwsem(&ag->lock);
+-      ag->id = atomic_inc_return(&autogroup_seq_nr);
++      ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
+       ag->tg = tg;
+ #ifdef CONFIG_RT_GROUP_SCHED
+       /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e8b3350..d83d44e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
+  * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+  * positive (at least 1, or number of jiffies left till timeout) if completed.
+  */
+-long __sched
++long __sched __intentional_overflow(-1)
+ wait_for_completion_interruptible_timeout(struct completion *x,
+                                         unsigned long timeout)
+ {
+@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+  *
+  * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+  */
+-int __sched wait_for_completion_killable(struct completion *x)
++int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
+ {
+       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+       if (t == -ERESTARTSYS)
+@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
+  * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+  * positive (at least 1, or number of jiffies left till timeout) if completed.
+  */
+-long __sched
++long __sched __intentional_overflow(-1)
+ wait_for_completion_killable_timeout(struct completion *x,
+                                    unsigned long timeout)
+ {
+@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
+       /* convert nice value [19,-20] to rlimit style value [1,40] */
+       int nice_rlim = 20 - nice;
++      gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
++
+       return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+               capable(CAP_SYS_NICE));
+ }
+@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+       if (nice > 19)
+               nice = 19;
+-      if (increment < 0 && !can_nice(current, nice))
++      if (increment < 0 && (!can_nice(current, nice) ||
++                            gr_handle_chroot_nice()))
+               return -EPERM;
+       retval = security_task_setnice(current, nice);
+@@ -3891,6 +3894,7 @@ recheck:
+                       unsigned long rlim_rtprio =
+                                       task_rlimit(p, RLIMIT_RTPRIO);
++                       gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
+                       /* can't set/change the rt policy */
+                       if (policy != p->policy && !rlim_rtprio)
+                               return -EPERM;
+@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+-static struct ctl_table sd_ctl_dir[] = {
++static ctl_table_no_const sd_ctl_dir[] __read_only = {
+       {
+               .procname       = "sched_domain",
+               .mode           = 0555,
+@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
+       {}
+ };
+-static struct ctl_table *sd_alloc_ctl_entry(int n)
++static ctl_table_no_const *sd_alloc_ctl_entry(int n)
+ {
+-      struct ctl_table *entry =
++      ctl_table_no_const *entry =
+               kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+       return entry;
+ }
+-static void sd_free_ctl_entry(struct ctl_table **tablep)
++static void sd_free_ctl_entry(ctl_table_no_const *tablep)
+ {
+-      struct ctl_table *entry;
++      ctl_table_no_const *entry;
+       /*
+        * In the intermediate directories, both the child directory and
+@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+        * will always be set. In the lowest directory the names are
+        * static strings and all have proc handlers.
+        */
+-      for (entry = *tablep; entry->mode; entry++) {
+-              if (entry->child)
+-                      sd_free_ctl_entry(&entry->child);
++      for (entry = tablep; entry->mode; entry++) {
++              if (entry->child) {
++                      sd_free_ctl_entry(entry->child);
++                      pax_open_kernel();
++                      entry->child = NULL;
++                      pax_close_kernel();
++              }
+               if (entry->proc_handler == NULL)
+                       kfree(entry->procname);
+       }
+-      kfree(*tablep);
+-      *tablep = NULL;
++      kfree(tablep);
+ }
+ static int min_load_idx = 0;
+ static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+ static void
+-set_table_entry(struct ctl_table *entry,
++set_table_entry(ctl_table_no_const *entry,
+               const char *procname, void *data, int maxlen,
+               umode_t mode, proc_handler *proc_handler,
+               bool load_idx)
+@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
+ static struct ctl_table *
+ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ {
+-      struct ctl_table *table = sd_alloc_ctl_entry(13);
++      ctl_table_no_const *table = sd_alloc_ctl_entry(13);
+       if (table == NULL)
+               return NULL;
+@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+       return table;
+ }
+-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
+ {
+-      struct ctl_table *entry, *table;
++      ctl_table_no_const *entry, *table;
+       struct sched_domain *sd;
+       int domain_num = 0, i;
+       char buf[32];
+@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
+ static void register_sched_domain_sysctl(void)
+ {
+       int i, cpu_num = num_possible_cpus();
+-      struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
++      ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
+       char buf[32];
+       WARN_ON(sd_ctl_dir[0].child);
++      pax_open_kernel();
+       sd_ctl_dir[0].child = entry;
++      pax_close_kernel();
+       if (entry == NULL)
+               return;
+@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
+       if (sd_sysctl_header)
+               unregister_sysctl_table(sd_sysctl_header);
+       sd_sysctl_header = NULL;
+-      if (sd_ctl_dir[0].child)
+-              sd_free_ctl_entry(&sd_ctl_dir[0].child);
++      if (sd_ctl_dir[0].child) {
++              sd_free_ctl_entry(sd_ctl_dir[0].child);
++              pax_open_kernel();
++              sd_ctl_dir[0].child = NULL;
++              pax_close_kernel();
++      }
+ }
+ #else
+ static void register_sched_domain_sysctl(void)
+@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+  * happens before everything else.  This has to be lower priority than
+  * the notifier in the perf_event subsystem, though.
+  */
+-static struct notifier_block __cpuinitdata migration_notifier = {
++static struct notifier_block migration_notifier = {
+       .notifier_call = migration_call,
+       .priority = CPU_PRI_MIGRATION,
+ };
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 03b73be..9422b9f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
+ static void reset_ptenuma_scan(struct task_struct *p)
+ {
+-      ACCESS_ONCE(p->mm->numa_scan_seq)++;
++      ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
+       p->mm->numa_scan_offset = 0;
+ }
+@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+  * run_rebalance_domains is triggered when needed from the scheduler tick.
+  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+  */
+-static void run_rebalance_domains(struct softirq_action *h)
++static void run_rebalance_domains(void)
+ {
+       int this_cpu = smp_processor_id();
+       struct rq *this_rq = cpu_rq(this_cpu);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index ce39224d..0e09343 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1009,7 +1009,7 @@ struct sched_class {
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+       void (*task_move_group) (struct task_struct *p, int on_rq);
+ #endif
+-};
++} __do_const;
+ #define sched_class_highest (&stop_sched_class)
+ #define for_each_class(class) \
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 113411b..20d0a99 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
+ int print_fatal_signals __read_mostly;
+-static void __user *sig_handler(struct task_struct *t, int sig)
++static __sighandler_t sig_handler(struct task_struct *t, int sig)
+ {
+       return t->sighand->action[sig - 1].sa.sa_handler;
+ }
+-static int sig_handler_ignored(void __user *handler, int sig)
++static int sig_handler_ignored(__sighandler_t handler, int sig)
+ {
+       /* Is it explicitly or implicitly ignored? */
+       return handler == SIG_IGN ||
+@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
+ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
+ {
+-      void __user *handler;
++      __sighandler_t handler;
+       handler = sig_handler(t, sig);
+@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+       atomic_inc(&user->sigpending);
+       rcu_read_unlock();
++      if (!override_rlimit)
++              gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
++
+       if (override_rlimit ||
+           atomic_read(&user->sigpending) <=
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
+@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+-      void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
++      __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
+       if (is_global_init(tsk))
+               return 1;
+       if (handler != SIG_IGN && handler != SIG_DFL)
+@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
+               }
+       }
++      /* allow glibc communication via tgkill to other threads in our
++         thread group */
++      if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
++           sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
++          && gr_handle_signal(t, sig))
++              return -EPERM;
++
+       return security_task_kill(t, info, sig, 0);
+ }
+@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+       return send_signal(sig, info, p, 1);
+ }
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+       return send_signal(sig, info, t, 0);
+@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+       unsigned long int flags;
+       int ret, blocked, ignored;
+       struct k_sigaction *action;
++      int is_unhandled = 0;
+       spin_lock_irqsave(&t->sighand->siglock, flags);
+       action = &t->sighand->action[sig-1];
+@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+       }
+       if (action->sa.sa_handler == SIG_DFL)
+               t->signal->flags &= ~SIGNAL_UNKILLABLE;
++      if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
++              is_unhandled = 1;
+       ret = specific_send_sig_info(sig, info, t);
+       spin_unlock_irqrestore(&t->sighand->siglock, flags);
++      /* only deal with unhandled signals, java etc trigger SIGSEGV during
++         normal operation */
++      if (is_unhandled) {
++              gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
++              gr_handle_crash(t, sig);
++      }
++
+       return ret;
+ }
+@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+       ret = check_kill_permission(sig, info, p);
+       rcu_read_unlock();
+-      if (!ret && sig)
++      if (!ret && sig) {
+               ret = do_send_sig_info(sig, info, p, true);
++              if (!ret)
++                      gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
++      }
+       return ret;
+ }
+@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+       int error = -ESRCH;
+       rcu_read_lock();
+-      p = find_task_by_vpid(pid);
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      /* allow glibc communication via tgkill to other threads in our
++         thread group */
++      if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
++          sig == (SIGRTMIN+1) && tgid == info->si_pid)            
++              p = find_task_by_vpid_unrestricted(pid);
++      else
++#endif
++              p = find_task_by_vpid(pid);
+       if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
+               error = check_kill_permission(sig, info, p);
+               /*
+@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
+               __put_user(t->sas_ss_size, &uss->ss_size);
+ }
++#ifdef CONFIG_X86
++void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
++{
++      struct task_struct *t = current;
++      put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
++      put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
++      put_user_ex(t->sas_ss_size, &uss->ss_size);
++}
++#endif
++
+ #ifdef CONFIG_COMPAT
+ COMPAT_SYSCALL_DEFINE2(sigaltstack,
+                       const compat_stack_t __user *, uss_ptr,
+@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
+       }
+       seg = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
+-                           (stack_t __force __user *) &uoss,
++      ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
++                           (stack_t __force_user *) &uoss,
+                            compat_user_stack_pointer());
+       set_fs(seg);
+       if (ret >= 0 && uoss_ptr)  {
+@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
+               __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+               __put_user(t->sas_ss_size, &uss->ss_size);
+ }
++
++#ifdef CONFIG_X86
++void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
++{
++      struct task_struct *t = current;
++      put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
++      put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
++      put_user_ex(t->sas_ss_size, &uss->ss_size);
++}
++#endif
+ #endif
+ #ifdef __ARCH_WANT_SYS_SIGPENDING
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 4dba0f7..fe9f773 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
++static struct notifier_block hotplug_cfd_notifier = {
+       .notifier_call          = hotplug_cfd,
+ };
+diff --git a/kernel/smpboot.c b/kernel/smpboot.c
+index 02fc5c9..e54c335 100644
+--- a/kernel/smpboot.c
++++ b/kernel/smpboot.c
+@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+               }
+               smpboot_unpark_thread(plug_thread, cpu);
+       }
+-      list_add(&plug_thread->list, &hotplug_threads);
++      pax_list_add(&plug_thread->list, &hotplug_threads);
+ out:
+       mutex_unlock(&smpboot_threads_lock);
+       return ret;
+@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
+ {
+       get_online_cpus();
+       mutex_lock(&smpboot_threads_lock);
+-      list_del(&plug_thread->list);
++      pax_list_del(&plug_thread->list);
+       smpboot_destroy_threads(plug_thread);
+       mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 3d6833f..da6d93d 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
+ EXPORT_SYMBOL(irq_stat);
+ #endif
+-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
++static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+-char *softirq_to_name[NR_SOFTIRQS] = {
++const char * const softirq_to_name[NR_SOFTIRQS] = {
+       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+       "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+@@ -250,7 +250,7 @@ restart:
+                       kstat_incr_softirqs_this_cpu(vec_nr);
+                       trace_softirq_entry(vec_nr);
+-                      h->action(h);
++                      h->action();
+                       trace_softirq_exit(vec_nr);
+                       if (unlikely(prev_count != preempt_count())) {
+                               printk(KERN_ERR "huh, entered softirq %u %s %p"
+@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
+       or_softirq_pending(1UL << nr);
+ }
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void __init open_softirq(int nr, void (*action)(void))
+ {
+       softirq_vec[nr].action = action;
+ }
+@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+-static void tasklet_action(struct softirq_action *a)
++static void tasklet_action(void)
+ {
+       struct tasklet_struct *list;
+@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
+       }
+ }
+-static void tasklet_hi_action(struct softirq_action *a)
++static void tasklet_hi_action(void)
+ {
+       struct tasklet_struct *list;
+@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
++static struct notifier_block remote_softirq_cpu_notifier = {
+       .notifier_call  = remote_softirq_cpu_notify,
+ };
+@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata cpu_nfb = {
++static struct notifier_block cpu_nfb = {
+       .notifier_call = cpu_callback
+ };
+-static struct smp_hotplug_thread softirq_threads = {
++static struct smp_hotplug_thread softirq_threads __read_only = {
+       .store                  = &ksoftirqd,
+       .thread_should_run      = ksoftirqd_should_run,
+       .thread_fn              = run_ksoftirqd,
+diff --git a/kernel/srcu.c b/kernel/srcu.c
+index 01d5ccb..cdcbee6 100644
+--- a/kernel/srcu.c
++++ b/kernel/srcu.c
+@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
+       idx = ACCESS_ONCE(sp->completed) & 0x1;
+       preempt_disable();
+-      ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
++      ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
+       smp_mb(); /* B */  /* Avoid leaking the critical section. */
+-      ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
++      ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
+       preempt_enable();
+       return idx;
+ }
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 2bbd9a7..0875671 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
+               error = -EACCES;
+               goto out;
+       }
++
++      if (gr_handle_chroot_setpriority(p, niceval)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       no_nice = security_task_setnice(p, niceval);
+       if (no_nice) {
+               error = no_nice;
+@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+                       goto error;
+       }
++      if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
++              goto error;
++
+       if (rgid != (gid_t) -1 ||
+           (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
+               new->sgid = new->egid;
+@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+       old = current_cred();
+       retval = -EPERM;
++
++      if (gr_check_group_change(kgid, kgid, kgid))
++              goto error;
++
+       if (nsown_capable(CAP_SETGID))
+               new->gid = new->egid = new->sgid = new->fsgid = kgid;
+       else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
+@@ -678,7 +691,7 @@ error:
+ /*
+  * change the user struct in a credentials set to match the new UID
+  */
+-static int set_user(struct cred *new)
++int set_user(struct cred *new)
+ {
+       struct user_struct *new_user;
+@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+                       goto error;
+       }
++      if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
++              goto error;
++
+       if (!uid_eq(new->uid, old->uid)) {
+               retval = set_user(new);
+               if (retval < 0)
+@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+       old = current_cred();
+       retval = -EPERM;
++
++      if (gr_check_crash_uid(kuid))
++              goto error;
++      if (gr_check_user_change(kuid, kuid, kuid))
++              goto error;
++
+       if (nsown_capable(CAP_SETUID)) {
+               new->suid = new->uid = kuid;
+               if (!uid_eq(kuid, old->uid)) {
+@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+                       goto error;
+       }
++      if (gr_check_user_change(kruid, keuid, INVALID_UID))
++              goto error;
++
+       if (ruid != (uid_t) -1) {
+               new->uid = kruid;
+               if (!uid_eq(kruid, old->uid)) {
+@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+                       goto error;
+       }
++      if (gr_check_group_change(krgid, kegid, INVALID_GID))
++              goto error;
++
+       if (rgid != (gid_t) -1)
+               new->gid = krgid;
+       if (egid != (gid_t) -1)
+@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+           uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
+           nsown_capable(CAP_SETUID)) {
+               if (!uid_eq(kuid, old->fsuid)) {
++                      if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
++                              goto error;
++
+                       new->fsuid = kuid;
+                       if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+                               goto change_okay;
+               }
+       }
++error:
+       abort_creds(new);
+       return old_fsuid;
+@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+       if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
+           gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
+           nsown_capable(CAP_SETGID)) {
++              if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
++                      goto error;
++
+               if (!gid_eq(kgid, old->fsgid)) {
+                       new->fsgid = kgid;
+                       goto change_okay;
+               }
+       }
++error:
+       abort_creds(new);
+       return old_fsgid;
+@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+               return -EFAULT;
+       down_read(&uts_sem);
+-      error = __copy_to_user(&name->sysname, &utsname()->sysname,
++      error = __copy_to_user(name->sysname, &utsname()->sysname,
+                              __OLD_UTS_LEN);
+       error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->nodename, &utsname()->nodename,
++      error |= __copy_to_user(name->nodename, &utsname()->nodename,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->release, &utsname()->release,
++      error |= __copy_to_user(name->release, &utsname()->release,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->release + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->version, &utsname()->version,
++      error |= __copy_to_user(name->version, &utsname()->version,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->version + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->machine, &utsname()->machine,
++      error |= __copy_to_user(name->machine, &utsname()->machine,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+       up_read(&uts_sem);
+@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+                        */
+                       new_rlim->rlim_cur = 1;
+               }
++              /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
++                 is changed to a lower value.  Since tasks can be created by the same
++                 user in between this limit change and an execve by this task, force
++                 a recheck only for this task by setting PF_NPROC_EXCEEDED
++              */
++              if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
++                      tsk->flags |= PF_NPROC_EXCEEDED;
+       }
+       if (!retval) {
+               if (old_rlim)
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 9edcf45..713c960 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -93,7 +93,6 @@
+ #if defined(CONFIG_SYSCTL)
+-
+ /* External variables not in a header file. */
+ extern int sysctl_overcommit_memory;
+ extern int sysctl_overcommit_ratio;
+@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
+ /* Constants used for minimum and  maximum */
+ #ifdef CONFIG_LOCKUP_DETECTOR
+-static int sixty = 60;
+-static int neg_one = -1;
++static int sixty __read_only = 60;
+ #endif
+-static int zero;
+-static int __maybe_unused one = 1;
+-static int __maybe_unused two = 2;
+-static int __maybe_unused three = 3;
+-static unsigned long one_ul = 1;
+-static int one_hundred = 100;
++static int neg_one __read_only = -1;
++static int zero __read_only = 0;
++static int __maybe_unused one __read_only = 1;
++static int __maybe_unused two __read_only = 2;
++static int __maybe_unused three __read_only = 3;
++static unsigned long one_ul __read_only = 1;
++static int one_hundred __read_only = 100;
+ #ifdef CONFIG_PRINTK
+-static int ten_thousand = 10000;
++static int ten_thousand __read_only = 10000;
+ #endif
+ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
+ #endif
+-#ifdef CONFIG_PRINTK
+ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos);
+-#endif
+ static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp, loff_t *ppos);
+@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
+ #endif
++extern struct ctl_table grsecurity_table[];
++
+ static struct ctl_table kern_table[];
+ static struct ctl_table vm_table[];
+ static struct ctl_table fs_table[];
+@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++      {
++              .procname       = "softmode",
++              .data           = &pax_softmode,
++              .maxlen         = sizeof(unsigned int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++
++      { }
++};
++#endif
++
+ /* The default sysctl tables: */
+ static struct ctl_table sysctl_base_table[] = {
+@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
+ #endif
+ static struct ctl_table kern_table[] = {
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++      {
++              .procname       = "grsecurity",
++              .mode           = 0500,
++              .child          = grsecurity_table,
++      },
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++      {
++              .procname       = "pax",
++              .mode           = 0500,
++              .child          = pax_table,
++      },
++#endif
++
+       {
+               .procname       = "sched_child_runs_first",
+               .data           = &sysctl_sched_child_runs_first,
+@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
+               .data           = &modprobe_path,
+               .maxlen         = KMOD_PATH_LEN,
+               .mode           = 0644,
+-              .proc_handler   = proc_dostring,
++              .proc_handler   = proc_dostring_modpriv,
+       },
+       {
+               .procname       = "modules_disabled",
+@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
++#endif
+       {
+               .procname       = "kptr_restrict",
+               .data           = &kptr_restrict,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax_sysadmin,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              .extra1         = &two,
++#else
+               .extra1         = &zero,
++#endif
+               .extra2         = &two,
+       },
+-#endif
+       {
+               .procname       = "ngroups_max",
+               .data           = &ngroups_max,
+@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
+        */
+       {
+               .procname       = "perf_event_paranoid",
+-              .data           = &sysctl_perf_event_paranoid,
+-              .maxlen         = sizeof(sysctl_perf_event_paranoid),
++              .data           = &sysctl_perf_event_legitimately_concerned,
++              .maxlen         = sizeof(sysctl_perf_event_legitimately_concerned),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec,
++              /* go ahead, be a hero */
++              .proc_handler   = proc_dointvec_minmax_sysadmin,
++              .extra1         = &neg_one,
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++              .extra2         = &three,
++#else
++              .extra2         = &two,
++#endif
+       },
+       {
+               .procname       = "perf_event_mlock_kb",
+@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+       },
++      {
++              .procname       = "heap_stack_gap",
++              .data           = &sysctl_heap_stack_gap,
++              .maxlen         = sizeof(sysctl_heap_stack_gap),
++              .mode           = 0644,
++              .proc_handler   = proc_doulongvec_minmax,
++      },
+ #else
+       {
+               .procname       = "nr_trim_pages",
+@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
+                              buffer, lenp, ppos);
+ }
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++                void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++      if (write && !capable(CAP_SYS_MODULE))
++              return -EPERM;
++
++      return _proc_do_string(table->data, table->maxlen, write,
++                             buffer, lenp, ppos);
++}
++
+ static size_t proc_skip_spaces(char **buf)
+ {
+       size_t ret;
+@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+       len = strlen(tmp);
+       if (len > *size)
+               len = *size;
++      if (len > sizeof(tmp))
++              len = sizeof(tmp);
+       if (copy_to_user(*buf, tmp, len))
+               return -EFAULT;
+       *size -= len;
+@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
+ static int proc_taint(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       unsigned long tmptaint = get_taint();
+       int err;
+@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
+       return err;
+ }
+-#ifdef CONFIG_PRINTK
+ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+       return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ }
+-#endif
+ struct do_proc_dointvec_minmax_conv_param {
+       int *min;
+@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+                       *i = val;
+               } else {
+                       val = convdiv * (*i) / convmul;
+-                      if (!first)
++                      if (!first) {
+                               err = proc_put_char(&buffer, &left, '\t');
++                              if (err)
++                                      break;
++                      }
+                       err = proc_put_long(&buffer, &left, val, false);
+                       if (err)
+                               break;
+@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
+       return -ENOSYS;
+ }
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++                void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++      return -ENOSYS;
++}
++
+ int proc_dointvec(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+ EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
+ EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
+ EXPORT_SYMBOL(proc_dostring);
++EXPORT_SYMBOL(proc_dostring_modpriv);
+ EXPORT_SYMBOL(proc_doulongvec_minmax);
+ EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
+diff --git a/kernel/taskstats.c b/kernel/taskstats.c
+index 145bb4d..b2aa969 100644
+--- a/kernel/taskstats.c
++++ b/kernel/taskstats.c
+@@ -28,9 +28,12 @@
+ #include <linux/fs.h>
+ #include <linux/file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/grsecurity.h>
+ #include <net/genetlink.h>
+ #include <linux/atomic.h>
++extern int gr_is_taskstats_denied(int pid);
++
+ /*
+  * Maximum length of a cpumask that can be specified in
+  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+@@ -570,6 +573,9 @@ err:
+ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+ {
++      if (gr_is_taskstats_denied(current->pid))
++              return -EACCES;
++
+       if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+               return cmd_attr_register_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+diff --git a/kernel/time.c b/kernel/time.c
+index d3617db..c98bbe9 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
+@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+               return error;
+       if (tz) {
++              /* we log in do_settimeofday called below, so don't log twice
++              */
++              if (!tv)
++                      gr_log_timechange();
++
+               sys_tz = *tz;
+               update_vsyscall_tz();
+               if (firsttime) {
+@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
+  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+  * value to a scaled second value.
+  */
+-unsigned long
++unsigned long __intentional_overflow(-1)
+ timespec_to_jiffies(const struct timespec *value)
+ {
+       unsigned long sec = value->tv_sec;
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index f11d83b..d016d91 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
+       struct platform_device *pdev;
+       int error = 0;
+       int i;
+-      struct k_clock alarm_clock = {
++      static struct k_clock alarm_clock = {
+               .clock_getres   = alarm_clock_getres,
+               .clock_get      = alarm_clock_get,
+               .timer_create   = alarm_timer_create,
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index baeeb5c..c22704a 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -15,6 +15,7 @@
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/clocksource.h>
+ #include <linux/jiffies.h>
+@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
+       if (!timespec_valid_strict(tv))
+               return -EINVAL;
++      gr_log_timechange();
++
+       raw_spin_lock_irqsave(&timekeeper_lock, flags);
+       write_seqcount_begin(&timekeeper_seq);
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 3bdf283..cc68d83 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
+ static void print_name_offset(struct seq_file *m, void *sym)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      SEQ_printf(m, "<%p>", NULL);
++#else
+       char symname[KSYM_NAME_LEN];
+       if (lookup_symbol_name((unsigned long)sym, symname) < 0)
+               SEQ_printf(m, "<%pK>", sym);
+       else
+               SEQ_printf(m, "%s", symname);
++#endif
+ }
+ static void
+@@ -119,7 +123,11 @@ next_one:
+ static void
+ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      SEQ_printf(m, "  .base:       %p\n", NULL);
++#else
+       SEQ_printf(m, "  .base:       %pK\n", base);
++#endif
+       SEQ_printf(m, "  .index:      %d\n",
+                       base->index);
+       SEQ_printf(m, "  .resolution: %Lu nsecs\n",
+@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
+ {
+       struct proc_dir_entry *pe;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
++#else
+       pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++#endif
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
+index 0b537f2..40d6c20 100644
+--- a/kernel/time/timer_stats.c
++++ b/kernel/time/timer_stats.c
+@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
+ static unsigned long nr_entries;
+ static struct entry entries[MAX_ENTRIES];
+-static atomic_t overflow_count;
++static atomic_unchecked_t overflow_count;
+ /*
+  * The entries are in a hash-table, for fast lookup:
+@@ -140,7 +140,7 @@ static void reset_entries(void)
+       nr_entries = 0;
+       memset(entries, 0, sizeof(entries));
+       memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
+-      atomic_set(&overflow_count, 0);
++      atomic_set_unchecked(&overflow_count, 0);
+ }
+ static struct entry *alloc_entry(void)
+@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+       if (likely(entry))
+               entry->count++;
+       else
+-              atomic_inc(&overflow_count);
++              atomic_inc_unchecked(&overflow_count);
+  out_unlock:
+       raw_spin_unlock_irqrestore(lock, flags);
+@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ static void print_name_offset(struct seq_file *m, unsigned long addr)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      seq_printf(m, "<%p>", NULL);
++#else
+       char symname[KSYM_NAME_LEN];
+       if (lookup_symbol_name(addr, symname) < 0)
+-              seq_printf(m, "<%p>", (void *)addr);
++              seq_printf(m, "<%pK>", (void *)addr);
+       else
+               seq_printf(m, "%s", symname);
++#endif
+ }
+ static int tstats_show(struct seq_file *m, void *v)
+@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
+       seq_puts(m, "Timer Stats Version: v0.2\n");
+       seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+-      if (atomic_read(&overflow_count))
++      if (atomic_read_unchecked(&overflow_count))
+               seq_printf(m, "Overflow: %d entries\n",
+-                      atomic_read(&overflow_count));
++                      atomic_read_unchecked(&overflow_count));
+       for (i = 0; i < nr_entries; i++) {
+               entry = entries + i;
+@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
+ {
+       struct proc_dir_entry *pe;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
++#else
+       pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
++#endif
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 15bc1b4..32da49c 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
+ /*
+  * This function runs timers and the timer-tq in bottom half context.
+  */
+-static void run_timer_softirq(struct softirq_action *h)
++static void run_timer_softirq(void)
+ {
+       struct tvec_base *base = __this_cpu_read(tvec_bases);
+@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
+  *
+  * In all cases the return value is guaranteed to be non-negative.
+  */
+-signed long __sched schedule_timeout(signed long timeout)
++signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
+ {
+       struct timer_list timer;
+       unsigned long expire;
+@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata timers_nb = {
++static struct notifier_block timers_nb = {
+       .notifier_call  = timer_cpu_notify,
+ };
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index b8b8560..75b1a09 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+       struct blk_trace *bt = filp->private_data;
+       char buf[16];
+-      snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
++      snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
+       return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ }
+@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+               return 1;
+       bt = buf->chan->private_data;
+-      atomic_inc(&bt->dropped);
++      atomic_inc_unchecked(&bt->dropped);
+       return 0;
+ }
+@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+       bt->dir = dir;
+       bt->dev = dev;
+-      atomic_set(&bt->dropped, 0);
++      atomic_set_unchecked(&bt->dropped, 0);
+       ret = -EIO;
+       bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6c508ff..ee55a13 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+       if (unlikely(ftrace_disabled))
+               return 0;
++      ret = ftrace_arch_code_modify_prepare();
++      FTRACE_WARN_ON(ret);
++      if (ret)
++              return 0;
++
+       ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++      FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
+       if (ret) {
+               ftrace_bug(ret, ip);
+-              return 0;
+       }
+-      return 1;
++      return ret ? 0 : 1;
+ }
+ /*
+@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
+       if (!count)
+               return 0;
++      pax_open_kernel();
+       sort(start, count, sizeof(*start),
+            ftrace_cmp_ips, ftrace_swap_ips);
++      pax_close_kernel();
+       start_pg = ftrace_allocate_pages(count);
+       if (!start_pg)
+@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static int ftrace_graph_active;
+-static struct notifier_block ftrace_suspend_notifier;
+-
+ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ {
+       return 0;
+@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+       return NOTIFY_DONE;
+ }
++static struct notifier_block ftrace_suspend_notifier = {
++      .notifier_call = ftrace_suspend_notifier_call
++};
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+                       trace_func_graph_ent_t entryfunc)
+ {
+@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+               goto out;
+       }
+-      ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
+       register_pm_notifier(&ftrace_suspend_notifier);
+       ftrace_graph_active++;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index e444ff8..438b8f4 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -352,9 +352,9 @@ struct buffer_data_page {
+  */
+ struct buffer_page {
+       struct list_head list;          /* list of buffer pages */
+-      local_t          write;         /* index for next write */
++      local_unchecked_t        write;         /* index for next write */
+       unsigned         read;          /* index for next read */
+-      local_t          entries;       /* entries on this page */
++      local_unchecked_t        entries;       /* entries on this page */
+       unsigned long    real_end;      /* real end of data */
+       struct buffer_data_page *page;  /* Actual data page */
+ };
+@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
+       unsigned long                   last_overrun;
+       local_t                         entries_bytes;
+       local_t                         entries;
+-      local_t                         overrun;
+-      local_t                         commit_overrun;
++      local_unchecked_t               overrun;
++      local_unchecked_t               commit_overrun;
+       local_t                         dropped_events;
+       local_t                         committing;
+       local_t                         commits;
+@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+        *
+        * We add a counter to the write field to denote this.
+        */
+-      old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+-      old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
++      old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
++      old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
+       /*
+        * Just make sure we have seen our old_write and synchronize
+@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+                * cmpxchg to only update if an interrupt did not already
+                * do it for us. If the cmpxchg fails, we don't care.
+                */
+-              (void)local_cmpxchg(&next_page->write, old_write, val);
+-              (void)local_cmpxchg(&next_page->entries, old_entries, eval);
++              (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
++              (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
+               /*
+                * No need to worry about races with clearing out the commit.
+@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
+ {
+-      return local_read(&bpage->entries) & RB_WRITE_MASK;
++      return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
+ }
+ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ {
+-      return local_read(&bpage->write) & RB_WRITE_MASK;
++      return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
+ }
+ static int
+@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+                        * bytes consumed in ring buffer from here.
+                        * Increment overrun to account for the lost events.
+                        */
+-                      local_add(page_entries, &cpu_buffer->overrun);
++                      local_add_unchecked(page_entries, &cpu_buffer->overrun);
+                       local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+               }
+@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+                * it is our responsibility to update
+                * the counters.
+                */
+-              local_add(entries, &cpu_buffer->overrun);
++              local_add_unchecked(entries, &cpu_buffer->overrun);
+               local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+               /*
+@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+               if (tail == BUF_PAGE_SIZE)
+                       tail_page->real_end = 0;
+-              local_sub(length, &tail_page->write);
++              local_sub_unchecked(length, &tail_page->write);
+               return;
+       }
+@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+               rb_event_set_padding(event);
+               /* Set the write back to the previous setting */
+-              local_sub(length, &tail_page->write);
++              local_sub_unchecked(length, &tail_page->write);
+               return;
+       }
+@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+       /* Set write to end of buffer */
+       length = (tail + length) - BUF_PAGE_SIZE;
+-      local_sub(length, &tail_page->write);
++      local_sub_unchecked(length, &tail_page->write);
+ }
+ /*
+@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+        * about it.
+        */
+       if (unlikely(next_page == commit_page)) {
+-              local_inc(&cpu_buffer->commit_overrun);
++              local_inc_unchecked(&cpu_buffer->commit_overrun);
+               goto out_reset;
+       }
+@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+                                     cpu_buffer->tail_page) &&
+                                    (cpu_buffer->commit_page ==
+                                     cpu_buffer->reader_page))) {
+-                              local_inc(&cpu_buffer->commit_overrun);
++                              local_inc_unchecked(&cpu_buffer->commit_overrun);
+                               goto out_reset;
+                       }
+               }
+@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+               length += RB_LEN_TIME_EXTEND;
+       tail_page = cpu_buffer->tail_page;
+-      write = local_add_return(length, &tail_page->write);
++      write = local_add_return_unchecked(length, &tail_page->write);
+       /* set write to only the index of the write */
+       write &= RB_WRITE_MASK;
+@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+       kmemcheck_annotate_bitfield(event, bitfield);
+       rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
+-      local_inc(&tail_page->entries);
++      local_inc_unchecked(&tail_page->entries);
+       /*
+        * If this is the first commit on the page, then update
+@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+       if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+               unsigned long write_mask =
+-                      local_read(&bpage->write) & ~RB_WRITE_MASK;
++                      local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
+               unsigned long event_length = rb_event_length(event);
+               /*
+                * This is on the tail page. It is possible that
+@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+                */
+               old_index += write_mask;
+               new_index += write_mask;
+-              index = local_cmpxchg(&bpage->write, old_index, new_index);
++              index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
+               if (index == old_index) {
+                       /* update counters */
+                       local_sub(event_length, &cpu_buffer->entries_bytes);
+@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+       /* Do the likely case first */
+       if (likely(bpage->page == (void *)addr)) {
+-              local_dec(&bpage->entries);
++              local_dec_unchecked(&bpage->entries);
+               return;
+       }
+@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+       start = bpage;
+       do {
+               if (bpage->page == (void *)addr) {
+-                      local_dec(&bpage->entries);
++                      local_dec_unchecked(&bpage->entries);
+                       return;
+               }
+               rb_inc_page(cpu_buffer, &bpage);
+@@ -3138,7 +3138,7 @@ static inline unsigned long
+ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+       return local_read(&cpu_buffer->entries) -
+-              (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
++              (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
+ }
+ /**
+@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+               return 0;
+       cpu_buffer = buffer->buffers[cpu];
+-      ret = local_read(&cpu_buffer->overrun);
++      ret = local_read_unchecked(&cpu_buffer->overrun);
+       return ret;
+ }
+@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+               return 0;
+       cpu_buffer = buffer->buffers[cpu];
+-      ret = local_read(&cpu_buffer->commit_overrun);
++      ret = local_read_unchecked(&cpu_buffer->commit_overrun);
+       return ret;
+ }
+@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+       /* if you care about this being correct, lock the buffer */
+       for_each_buffer_cpu(buffer, cpu) {
+               cpu_buffer = buffer->buffers[cpu];
+-              overruns += local_read(&cpu_buffer->overrun);
++              overruns += local_read_unchecked(&cpu_buffer->overrun);
+       }
+       return overruns;
+@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+       /*
+        * Reset the reader page to size zero.
+        */
+-      local_set(&cpu_buffer->reader_page->write, 0);
+-      local_set(&cpu_buffer->reader_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+       local_set(&cpu_buffer->reader_page->page->commit, 0);
+       cpu_buffer->reader_page->real_end = 0;
+@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+        * want to compare with the last_overrun.
+        */
+       smp_mb();
+-      overwrite = local_read(&(cpu_buffer->overrun));
++      overwrite = local_read_unchecked(&(cpu_buffer->overrun));
+       /*
+        * Here's the tricky part.
+@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+       cpu_buffer->head_page
+               = list_entry(cpu_buffer->pages, struct buffer_page, list);
+-      local_set(&cpu_buffer->head_page->write, 0);
+-      local_set(&cpu_buffer->head_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->head_page->write, 0);
++      local_set_unchecked(&cpu_buffer->head_page->entries, 0);
+       local_set(&cpu_buffer->head_page->page->commit, 0);
+       cpu_buffer->head_page->read = 0;
+@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+       INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
+-      local_set(&cpu_buffer->reader_page->write, 0);
+-      local_set(&cpu_buffer->reader_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+       local_set(&cpu_buffer->reader_page->page->commit, 0);
+       cpu_buffer->reader_page->read = 0;
+       local_set(&cpu_buffer->entries_bytes, 0);
+-      local_set(&cpu_buffer->overrun, 0);
+-      local_set(&cpu_buffer->commit_overrun, 0);
++      local_set_unchecked(&cpu_buffer->overrun, 0);
++      local_set_unchecked(&cpu_buffer->commit_overrun, 0);
+       local_set(&cpu_buffer->dropped_events, 0);
+       local_set(&cpu_buffer->entries, 0);
+       local_set(&cpu_buffer->committing, 0);
+@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+               rb_init_page(bpage);
+               bpage = reader->page;
+               reader->page = *data_page;
+-              local_set(&reader->write, 0);
+-              local_set(&reader->entries, 0);
++              local_set_unchecked(&reader->write, 0);
++              local_set_unchecked(&reader->entries, 0);
+               reader->read = 0;
+               *data_page = bpage;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 06a5bce..53ad6e7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3347,7 +3347,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+       return 0;
+ }
+-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
+ {
+       /* do nothing if flag is already set */
+       if (!!(trace_flags & mask) == !!enabled)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 51b4448..7be601f 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
+ void trace_printk_init_buffers(void);
+ void trace_printk_start_comm(void);
+ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
+ /*
+  * Normal trace_printk() and friends allocates special buffers
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 6953263..2004e16 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
+ struct ftrace_module_file_ops {
+       struct list_head                list;
+       struct module                   *mod;
+-      struct file_operations          id;
+-      struct file_operations          enable;
+-      struct file_operations          format;
+-      struct file_operations          filter;
+ };
+ static struct ftrace_module_file_ops *
+@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
+       file_ops->mod = mod;
+-      file_ops->id = ftrace_event_id_fops;
+-      file_ops->id.owner = mod;
+-
+-      file_ops->enable = ftrace_enable_fops;
+-      file_ops->enable.owner = mod;
+-
+-      file_ops->filter = ftrace_event_filter_fops;
+-      file_ops->filter.owner = mod;
+-
+-      file_ops->format = ftrace_event_format_fops;
+-      file_ops->format.owner = mod;
++      pax_open_kernel();
++      mod->trace_id.owner = mod;
++      mod->trace_enable.owner = mod;
++      mod->trace_filter.owner = mod;
++      mod->trace_format.owner = mod;
++      pax_close_kernel();
+       list_add(&file_ops->list, &ftrace_module_file_list);
+@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
+                         struct ftrace_module_file_ops *file_ops)
+ {
+       return __trace_add_new_event(call, tr,
+-                                   &file_ops->id, &file_ops->enable,
+-                                   &file_ops->filter, &file_ops->format);
++                                   &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
++                                   &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
+ }
+ #else
+diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
+index a5e8f48..a9690d2 100644
+--- a/kernel/trace/trace_mmiotrace.c
++++ b/kernel/trace/trace_mmiotrace.c
+@@ -24,7 +24,7 @@ struct header_iter {
+ static struct trace_array *mmio_trace_array;
+ static bool overrun_detected;
+ static unsigned long prev_overruns;
+-static atomic_t dropped_count;
++static atomic_unchecked_t dropped_count;
+ static void mmio_reset_data(struct trace_array *tr)
+ {
+@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
+ static unsigned long count_overruns(struct trace_iterator *iter)
+ {
+-      unsigned long cnt = atomic_xchg(&dropped_count, 0);
++      unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
+       unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
+       if (over > prev_overruns)
+@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
+       event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+                                         sizeof(*entry), 0, pc);
+       if (!event) {
+-              atomic_inc(&dropped_count);
++              atomic_inc_unchecked(&dropped_count);
+               return;
+       }
+       entry   = ring_buffer_event_data(event);
+@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
+       event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+                                         sizeof(*entry), 0, pc);
+       if (!event) {
+-              atomic_inc(&dropped_count);
++              atomic_inc_unchecked(&dropped_count);
+               return;
+       }
+       entry   = ring_buffer_event_data(event);
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index bb922d9..2a54a257 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
+       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+       if (!IS_ERR(p)) {
+-              p = mangle_path(s->buffer + s->len, p, "\n");
++              p = mangle_path(s->buffer + s->len, p, "\n\\");
+               if (p) {
+                       s->len = p - s->buffer;
+                       return 1;
+@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
+                       goto out;
+       }
++      pax_open_kernel();
+       if (event->funcs->trace == NULL)
+-              event->funcs->trace = trace_nop_print;
++              *(void **)&event->funcs->trace = trace_nop_print;
+       if (event->funcs->raw == NULL)
+-              event->funcs->raw = trace_nop_print;
++              *(void **)&event->funcs->raw = trace_nop_print;
+       if (event->funcs->hex == NULL)
+-              event->funcs->hex = trace_nop_print;
++              *(void **)&event->funcs->hex = trace_nop_print;
+       if (event->funcs->binary == NULL)
+-              event->funcs->binary = trace_nop_print;
++              *(void **)&event->funcs->binary = trace_nop_print;
++      pax_close_kernel();
+       key = event->type & (EVENT_HASHSIZE - 1);
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index b20428c..4845a10 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
+               return;
+       /* we do not handle interrupt stacks yet */
+-      if (!object_is_on_stack(stack))
++      if (!object_starts_on_stack(stack))
+               return;
+       local_irq_save(flags);
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index 9064b91..1f5d2f8 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
+           !kgid_has_mapping(parent_ns, group))
+               return -EPERM;
++#ifdef CONFIG_GRKERNSEC
++      /*
++       * This doesn't really inspire confidence:
++       * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
++       * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
++       * Increases kernel attack surface in areas developers
++       * previously cared little about ("low importance due
++       * to requiring "root" capability")
++       * To be removed when this code receives *proper* review
++       */
++      if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
++                      !capable(CAP_SETGID))
++              return -EPERM;
++#endif
++
+       ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
+       if (!ns)
+               return -ENOMEM;
+@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
+       if (atomic_read(&current->mm->mm_users) > 1)
+               return -EINVAL;
+-      if (current->fs->users != 1)
++      if (atomic_read(&current->fs->users) != 1)
+               return -EINVAL;
+       if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
+index 4f69f9a..7c6f8f8 100644
+--- a/kernel/utsname_sysctl.c
++++ b/kernel/utsname_sysctl.c
+@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
+ static int proc_do_uts_string(ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table uts_table;
++      ctl_table_no_const uts_table;
+       int r;
+       memcpy(&uts_table, table, sizeof(uts_table));
+       uts_table.data = get_uts(table, write);
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 05039e3..17490c7 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
+ }
+ #endif /* CONFIG_SYSCTL */
+-static struct smp_hotplug_thread watchdog_threads = {
++static struct smp_hotplug_thread watchdog_threads __read_only = {
+       .store                  = &softlockup_watchdog,
+       .thread_should_run      = watchdog_should_run,
+       .thread_fn              = watchdog,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 6f01921..139869b 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
+               WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+               worker_flags |= WORKER_REBOUND;
+               worker_flags &= ~WORKER_UNBOUND;
+-              ACCESS_ONCE(worker->flags) = worker_flags;
++              ACCESS_ONCE_RW(worker->flags) = worker_flags;
+       }
+       spin_unlock_irq(&pool->lock);
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 74fdc5c..3310593 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
+ config DEBUG_LOCK_ALLOC
+       bool "Lock debugging: detect incorrect freeing of live locks"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+       select LOCKDEP
+@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
+ config PROVE_LOCKING
+       bool "Lock debugging: prove locking correctness"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select LOCKDEP
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+@@ -614,7 +614,7 @@ config LOCKDEP
+ config LOCK_STAT
+       bool "Lock usage statistics"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select LOCKDEP
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+@@ -1282,6 +1282,7 @@ config LATENCYTOP
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
++      depends on !GRKERNSEC_HIDESYM
+       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+       select KALLSYMS
+       select KALLSYMS_ALL
+@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ config DEBUG_STRICT_USER_COPY_CHECKS
+       bool "Strict user copy size checks"
+       depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+-      depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
++      depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
+       help
+         Enabling this option turns a certain set of sanity checks for user
+         copy operations into compile time failures.
+@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
+ config PROVIDE_OHCI1394_DMA_INIT
+       bool "Remote debugging over FireWire early on boot"
+-      depends on PCI && X86
++      depends on PCI && X86 && !GRKERNSEC
+       help
+         If you want to debug problems which hang or crash the kernel early
+         on boot and the crashing machine has a FireWire port, you can use
+@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
+ config FIREWIRE_OHCI_REMOTE_DMA
+       bool "Remote debugging over FireWire with firewire-ohci"
+-      depends on FIREWIRE_OHCI
++      depends on FIREWIRE_OHCI && !GRKERNSEC
+       help
+         This option lets you use the FireWire bus for remote debugging
+         with help of the firewire-ohci driver. It enables unfiltered
+diff --git a/lib/Makefile b/lib/Makefile
+index c55a037..fb46e3b 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+ obj-$(CONFIG_BTREE) += btree.o
+ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
++obj-y += list_debug.o
+ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
+ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 06f7e4f..f3cf2b0 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+ {
+       int c, old_c, totaldigits, ndigits, nchunks, nbits;
+       u32 chunk;
+-      const char __user __force *ubuf = (const char __user __force *)buf;
++      const char __user *ubuf = (const char __force_user *)buf;
+       bitmap_zero(maskp, nmaskbits);
+@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
+ {
+       if (!access_ok(VERIFY_READ, ubuf, ulen))
+               return -EFAULT;
+-      return __bitmap_parse((const char __force *)ubuf,
++      return __bitmap_parse((const char __force_kernel *)ubuf,
+                               ulen, 1, maskp, nmaskbits);
+ }
+@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ {
+       unsigned a, b;
+       int c, old_c, totaldigits;
+-      const char __user __force *ubuf = (const char __user __force *)buf;
++      const char __user *ubuf = (const char __force_user *)buf;
+       int exp_digit, in_range;
+       totaldigits = c = 0;
+@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+ {
+       if (!access_ok(VERIFY_READ, ubuf, ulen))
+               return -EFAULT;
+-      return __bitmap_parselist((const char __force *)ubuf,
++      return __bitmap_parselist((const char __force_kernel *)ubuf,
+                                       ulen, 1, maskp, nmaskbits);
+ }
+ EXPORT_SYMBOL(bitmap_parselist_user);
+diff --git a/lib/bug.c b/lib/bug.c
+index 1686034..a9c00c8 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+               return BUG_TRAP_TYPE_NONE;
+       bug = find_bug(bugaddr);
++      if (!bug)
++              return BUG_TRAP_TYPE_NONE;
+       file = NULL;
+       line = 0;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 37061ed..da83f48 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+       if (limit > 4)
+               return;
+-      is_on_stack = object_is_on_stack(addr);
++      is_on_stack = object_starts_on_stack(addr);
+       if (is_on_stack == onstack)
+               return;
+diff --git a/lib/devres.c b/lib/devres.c
+index 8235331..5881053 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
+ void devm_iounmap(struct device *dev, void __iomem *addr)
+ {
+       WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+-                             (void *)addr));
++                             (void __force *)addr));
+       iounmap(addr);
+ }
+ EXPORT_SYMBOL(devm_iounmap);
+@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+ {
+       ioport_unmap(addr);
+       WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+-                             devm_ioport_map_match, (void *)addr));
++                             devm_ioport_map_match, (void __force *)addr));
+ }
+ EXPORT_SYMBOL(devm_ioport_unmap);
+ #endif /* CONFIG_HAS_IOPORT */
+diff --git a/lib/div64.c b/lib/div64.c
+index a163b6c..9618fa5 100644
+--- a/lib/div64.c
++++ b/lib/div64.c
+@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
+ EXPORT_SYMBOL(__div64_32);
+ #ifndef div_s64_rem
+-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
++s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+ {
+       u64 quotient;
+@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
+  * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
+  */
+ #ifndef div64_u64
+-u64 div64_u64(u64 dividend, u64 divisor)
++u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
+ {
+       u32 high = divisor >> 32;
+       u64 quot;
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index d87a17a..ac0d79a 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
+ void dma_debug_add_bus(struct bus_type *bus)
+ {
+-      struct notifier_block *nb;
++      notifier_block_no_const *nb;
+       if (global_disable)
+               return;
+@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
+ static void check_for_stack(struct device *dev, void *addr)
+ {
+-      if (object_is_on_stack(addr))
++      if (object_starts_on_stack(addr))
+               err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+                               "stack [addr=%p]\n", addr);
+ }
+diff --git a/lib/inflate.c b/lib/inflate.c
+index 013a761..c28f3fc 100644
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -269,7 +269,7 @@ static void free(void *where)
+               malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index 0c9216c..863bd89 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+       unsigned long next;
+       phys_addr -= addr;
+-      pmd = pmd_alloc(&init_mm, pud, addr);
++      pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+       unsigned long next;
+       phys_addr -= addr;
+-      pud = pud_alloc(&init_mm, pgd, addr);
++      pud = pud_alloc_kernel(&init_mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
+index bd2bea9..6b3c95e 100644
+--- a/lib/is_single_threaded.c
++++ b/lib/is_single_threaded.c
+@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
+       struct task_struct *p, *t;
+       bool ret;
++      if (!mm)
++              return true;
++
+       if (atomic_read(&task->signal->live) != 1)
+               return false;
+diff --git a/lib/kobject.c b/lib/kobject.c
+index b7e29a6..2f3ca75 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
+       kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+       if (!kset)
+               return NULL;
+-      retval = kobject_set_name(&kset->kobj, name);
++      retval = kobject_set_name(&kset->kobj, "%s", name);
+       if (retval) {
+               kfree(kset);
+               return NULL;
+@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+ static DEFINE_SPINLOCK(kobj_ns_type_lock);
+-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+ {
+       enum kobj_ns_type type = ops->type;
+       int error;
+diff --git a/lib/list_debug.c b/lib/list_debug.c
+index c24c2f7..06e070b 100644
+--- a/lib/list_debug.c
++++ b/lib/list_debug.c
+@@ -11,7 +11,9 @@
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
+ #include <linux/rculist.h>
++#include <linux/mm.h>
++#ifdef CONFIG_DEBUG_LIST
+ /*
+  * Insert a new entry between two known consecutive entries.
+  *
+@@ -19,21 +21,32 @@
+  * the prev/next entries already!
+  */
+-void __list_add(struct list_head *new,
+-                            struct list_head *prev,
+-                            struct list_head *next)
++static bool __list_add_debug(struct list_head *new,
++                           struct list_head *prev,
++                           struct list_head *next)
+ {
+-      WARN(next->prev != prev,
++      if (WARN(next->prev != prev,
+               "list_add corruption. next->prev should be "
+               "prev (%p), but was %p. (next=%p).\n",
+-              prev, next->prev, next);
+-      WARN(prev->next != next,
++              prev, next->prev, next) ||
++          WARN(prev->next != next,
+               "list_add corruption. prev->next should be "
+               "next (%p), but was %p. (prev=%p).\n",
+-              next, prev->next, prev);
+-      WARN(new == prev || new == next,
+-           "list_add double add: new=%p, prev=%p, next=%p.\n",
+-           new, prev, next);
++              next, prev->next, prev) ||
++          WARN(new == prev || new == next,
++              "list_add double add: new=%p, prev=%p, next=%p.\n",
++              new, prev, next))
++              return false;
++      return true;
++}
++
++void __list_add(struct list_head *new,
++              struct list_head *prev,
++              struct list_head *next)
++{
++      if (!__list_add_debug(new, prev, next))
++              return;
++
+       next->prev = new;
+       new->next = next;
+       new->prev = prev;
+@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
+ }
+ EXPORT_SYMBOL(__list_add);
+-void __list_del_entry(struct list_head *entry)
++static bool __list_del_entry_debug(struct list_head *entry)
+ {
+       struct list_head *prev, *next;
+@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
+           WARN(next->prev != entry,
+               "list_del corruption. next->prev should be %p, "
+               "but was %p\n", entry, next->prev))
++              return false;
++      return true;
++}
++
++void __list_del_entry(struct list_head *entry)
++{
++      if (!__list_del_entry_debug(entry))
+               return;
+-      __list_del(prev, next);
++      __list_del(entry->prev, entry->next);
+ }
+ EXPORT_SYMBOL(__list_del_entry);
+@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
+ void __list_add_rcu(struct list_head *new,
+                   struct list_head *prev, struct list_head *next)
+ {
+-      WARN(next->prev != prev,
+-              "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+-              prev, next->prev, next);
+-      WARN(prev->next != next,
+-              "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+-              next, prev->next, prev);
++      if (!__list_add_debug(new, prev, next))
++              return;
++
+       new->next = next;
+       new->prev = prev;
+       rcu_assign_pointer(list_next_rcu(prev), new);
+       next->prev = new;
+ }
+ EXPORT_SYMBOL(__list_add_rcu);
++#endif
++
++void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_add_debug(new, prev, next))
++              return;
++#endif
++
++      pax_open_kernel();
++      next->prev = new;
++      new->next = next;
++      new->prev = prev;
++      prev->next = new;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add);
++
++void pax_list_del(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_del_entry_debug(entry))
++              return;
++#endif
++
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      entry->next = LIST_POISON1;
++      entry->prev = LIST_POISON2;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del);
++
++void pax_list_del_init(struct list_head *entry)
++{
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      INIT_LIST_HEAD(entry);
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_init);
++
++void __pax_list_add_rcu(struct list_head *new,
++                      struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_add_debug(new, prev, next))
++              return;
++#endif
++
++      pax_open_kernel();
++      new->next = next;
++      new->prev = prev;
++      rcu_assign_pointer(list_next_rcu(prev), new);
++      next->prev = new;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add_rcu);
++
++void pax_list_del_rcu(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_del_entry_debug(entry))
++              return;
++#endif
++
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      entry->next = LIST_POISON1;
++      entry->prev = LIST_POISON2;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_rcu);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index e796429..6e38f9f 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -92,7 +92,7 @@ struct radix_tree_preload {
+       int nr;
+       struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+ static inline void *ptr_to_indirect(void *ptr)
+ {
+diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
+index bb2b201..46abaf9 100644
+--- a/lib/strncpy_from_user.c
++++ b/lib/strncpy_from_user.c
+@@ -21,7 +21,7 @@
+  */
+ static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+ {
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       long res = 0;
+       /*
+diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
+index a28df52..3d55877 100644
+--- a/lib/strnlen_user.c
++++ b/lib/strnlen_user.c
+@@ -26,7 +26,7 @@
+  */
+ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+ {
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       long align, res = 0;
+       unsigned long c;
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index d23762e..e21eab2 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
+ void
+ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+-                    dma_addr_t dev_addr)
++                    dma_addr_t dev_addr, struct dma_attrs *attrs)
+ {
+       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+diff --git a/lib/usercopy.c b/lib/usercopy.c
+index 4f5b1dd..7cab418 100644
+--- a/lib/usercopy.c
++++ b/lib/usercopy.c
+@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
+       WARN(1, "Buffer overflow detected!\n");
+ }
+ EXPORT_SYMBOL(copy_from_user_overflow);
++
++void copy_to_user_overflow(void)
++{
++      WARN(1, "Buffer overflow detected!\n");
++}
++EXPORT_SYMBOL(copy_to_user_overflow);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index e149c64..24aa71a 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -16,6 +16,9 @@
+  * - scnprintf and vscnprintf
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <stdarg.h>
+ #include <linux/module.h>     /* for KSYM_SYMBOL_LEN */
+ #include <linux/types.h>
+@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
+       return number(buf, end, *(const netdev_features_t *)addr, spec);
+ }
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++int kptr_restrict __read_mostly = 2;
++#else
+ int kptr_restrict __read_mostly;
++#endif
+ /*
+  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
+@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
+  * - 'f' For simple symbolic function names without offset
+  * - 'S' For symbolic direct pointers with offset
+  * - 's' For symbolic direct pointers without offset
++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
+  * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
+  * - 'B' For backtraced symbolic direct pointers with offset
+  * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+       if (!ptr && *fmt != 'K') {
+               /*
+-               * Print (null) with the same width as a pointer so it makes
++               * Print (nil) with the same width as a pointer so it makes
+                * tabular output look nice.
+                */
+               if (spec.field_width == -1)
+                       spec.field_width = default_width;
+-              return string(buf, end, "(null)", spec);
++              return string(buf, end, "(nil)", spec);
+       }
+       switch (*fmt) {
+@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+               /* Fallthrough */
+       case 'S':
+       case 's':
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              break;
++#else
++              return symbol_string(buf, end, ptr, spec, fmt);
++#endif
++      case 'A':
+       case 'B':
+               return symbol_string(buf, end, ptr, spec, fmt);
+       case 'R':
+@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+                       va_end(va);
+                       return buf;
+               }
++      case 'P':
++              break;
+       case 'K':
+               /*
+                * %pK cannot be used in IRQ context because its test
+@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+               return number(buf, end,
+                             (unsigned long long) *((phys_addr_t *)ptr), spec);
+       }
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      /* 'P' = approved pointers to copy to userland,
++         as in the /proc/kallsyms case, as we make it display nothing
++         for non-root users, and the real contents for root users
++         Also ignore 'K' pointers, since we force their NULLing for non-root users
++         above
++      */
++      if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
++              printk(KERN_ALERT "grsec: kernel infoleak detected!  Please report this log to spender@grsecurity.net.\n");
++              dump_stack();
++              ptr = NULL;
++      }
++#endif
++
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = default_width;
+@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+       typeof(type) value;                                             \
+       if (sizeof(type) == 8) {                                        \
+               args = PTR_ALIGN(args, sizeof(u32));                    \
+-              *(u32 *)&value = *(u32 *)args;                          \
+-              *((u32 *)&value + 1) = *(u32 *)(args + 4);              \
++              *(u32 *)&value = *(const u32 *)args;                    \
++              *((u32 *)&value + 1) = *(const u32 *)(args + 4);        \
+       } else {                                                        \
+               args = PTR_ALIGN(args, sizeof(type));                   \
+-              value = *(typeof(type) *)args;                          \
++              value = *(const typeof(type) *)args;                    \
+       }                                                               \
+       args += sizeof(type);                                           \
+       value;                                                          \
+@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+               case FORMAT_TYPE_STR: {
+                       const char *str_arg = args;
+                       args += strlen(str_arg) + 1;
+-                      str = string(str, end, (char *)str_arg, spec);
++                      str = string(str, end, str_arg, spec);
+                       break;
+               }
+diff --git a/localversion-grsec b/localversion-grsec
+new file mode 100644
+index 0000000..7cd6065
+--- /dev/null
++++ b/localversion-grsec
+@@ -0,0 +1 @@
++-grsec
+diff --git a/mm/Kconfig b/mm/Kconfig
+index e742d06..c56fdd8 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -317,10 +317,10 @@ config KSM
+         root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+ config DEFAULT_MMAP_MIN_ADDR
+-        int "Low address space to protect from user allocation"
++      int "Low address space to protect from user allocation"
+       depends on MMU
+-        default 4096
+-        help
++      default 65536
++      help
+         This is the portion of low virtual memory which should be protected
+         from userspace allocation.  Keeping a user from writing to low pages
+         can help reduce the impact of kernel NULL pointer bugs.
+@@ -351,7 +351,7 @@ config MEMORY_FAILURE
+ config HWPOISON_INJECT
+       tristate "HWPoison pages injector"
+-      depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
++      depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
+       select PROC_PAGE_MONITOR
+ config NOMMU_INITIAL_TRIM_EXCESS
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 5025174..9d67dcd 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -12,7 +12,7 @@
+ #include <linux/device.h>
+ #include <trace/events/writeback.h>
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+ struct backing_dev_info default_backing_dev_info = {
+       .name           = "default",
+@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
+ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+                          unsigned int cap)
+ {
+-      char tmp[32];
+       int err;
+       bdi->name = name;
+@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+       if (err)
+               return err;
+-      sprintf(tmp, "%.28s%s", name, "-%d");
+-      err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
++      err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
+       if (err) {
+               bdi_destroy(bdi);
+               return err;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 7905fe7..e60faa8 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+       struct address_space *mapping = file->f_mapping;
+       if (!mapping->a_ops->readpage)
+-              return -ENOEXEC;
++              return -ENODEV;
+       file_accessed(file);
+       vma->vm_ops = &generic_file_vm_ops;
+       return 0;
+@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
+                         *pos = i_size_read(inode);
+               if (limit != RLIM_INFINITY) {
++                      gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+                       if (*pos >= limit) {
+                               send_sig(SIGXFSZ, current, 0);
+                               return -EFBIG;
+diff --git a/mm/fremap.c b/mm/fremap.c
+index 87da359..3f41cb1 100644
+--- a/mm/fremap.c
++++ b/mm/fremap.c
+@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+  retry:
+       vma = find_vma(mm, start);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++              goto out;
++#endif
++
+       /*
+        * Make sure the vma is shared, that it supports prefaulting,
+        * and that the remapped range is valid and fully within
+diff --git a/mm/highmem.c b/mm/highmem.c
+index b32b70c..e512eb0 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
+                * So no dangers, even with speculative execution.
+                */
+               page = pte_page(pkmap_page_table[i]);
++              pax_open_kernel();
+               pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
+-
++              pax_close_kernel();
+               set_page_address(page, NULL);
+               need_flush = 1;
+       }
+@@ -198,9 +199,11 @@ start:
+               }
+       }
+       vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++      pax_open_kernel();
+       set_pte_at(&init_mm, vaddr,
+                  &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+-
++      pax_close_kernel();
+       pkmap_count[last_pkmap_nr] = 1;
+       set_page_address(page, (void *)vaddr);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7c5eb85..5c01c2f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+       int ret;
++      ctl_table_no_const hugetlb_table;
+       tmp = h->max_huge_pages;
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
+-      table->data = &tmp;
+-      table->maxlen = sizeof(unsigned long);
+-      ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++      hugetlb_table = *table;
++      hugetlb_table.data = &tmp;
++      hugetlb_table.maxlen = sizeof(unsigned long);
++      ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
+@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+       int ret;
++      ctl_table_no_const hugetlb_table;
+       tmp = h->nr_overcommit_huge_pages;
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
+-      table->data = &tmp;
+-      table->maxlen = sizeof(unsigned long);
+-      ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++      hugetlb_table = *table;
++      hugetlb_table.data = &tmp;
++      hugetlb_table.maxlen = sizeof(unsigned long);
++      ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
+@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+       return 1;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      struct vm_area_struct *vma_m;
++      unsigned long address_m;
++      pte_t *ptep_m;
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++      get_page(page_m);
++      hugepage_add_anon_rmap(page_m, vma_m, address_m);
++      set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ /*
+  * Hugetlb_cow() should be called with page lock of the original hugepage held.
+  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+@@ -2663,6 +2688,11 @@ retry_avoidcopy:
+                               make_huge_pte(vma, new_page, 1));
+               page_remove_rmap(old_page);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+               /* Make the old page be freed below */
+               new_page = old_page;
+       }
+@@ -2821,6 +2851,10 @@ retry:
+                               && (vma->vm_flags & VM_SHARED)));
+       set_huge_pte_at(mm, address, ptep, new_pte);
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_huge_pte(vma, address, page);
++#endif
++
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+               /* Optimization, do the COW without a second fault */
+               ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+       static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+       struct hstate *h = hstate_vma(vma);
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       address &= huge_page_mask(h);
+       ptep = huge_pte_offset(mm, address);
+@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                               VM_FAULT_SET_HINDEX(hstate_index(h));
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              unsigned long address_m;
++
++              if (vma->vm_start > vma_m->vm_start) {
++                      address_m = address;
++                      address -= SEGMEXEC_TASK_SIZE;
++                      vma = vma_m;
++                      h = hstate_vma(vma);
++              } else
++                      address_m = address + SEGMEXEC_TASK_SIZE;
++
++              if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++                      return VM_FAULT_OOM;
++              address_m &= HPAGE_MASK;
++              unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++      }
++#endif
++
+       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+       if (!ptep)
+               return VM_FAULT_OOM;
+diff --git a/mm/internal.h b/mm/internal.h
+index 8562de0..92b2073 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
+  * in mm/page_alloc.c
+  */
+ extern void __free_pages_bootmem(struct page *page, unsigned int order);
++extern void free_compound_page(struct page *page);
+ extern void prep_compound_page(struct page *page, unsigned long order);
+ #ifdef CONFIG_MEMORY_FAILURE
+ extern bool is_free_buddy_page(struct page *page);
+@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
+ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
+         unsigned long, unsigned long,
+-        unsigned long, unsigned long);
++        unsigned long, unsigned long) __intentional_overflow(-1);
+ extern void set_pageblock_order(void);
+ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index c8d7f31..2dbeffd 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
+       for (i = 0; i < object->trace_len; i++) {
+               void *ptr = (void *)object->trace[i];
+-              seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
++              seq_printf(seq, "    [<%pP>] %pA\n", ptr, ptr);
+       }
+ }
+@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
+               return -ENOMEM;
+       }
+-      dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
++      dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warning("Failed to create the debugfs kmemleak file\n");
+diff --git a/mm/maccess.c b/mm/maccess.c
+index d53adf9..03a24bf 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       ret = __copy_from_user_inatomic(dst,
+-                      (__force const void __user *)src, size);
++                      (const void __force_user *)src, size);
+       pagefault_enable();
+       set_fs(old_fs);
+@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+-      ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
++      ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+       pagefault_enable();
+       set_fs(old_fs);
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 7055883..aafb1ed 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
+       pgoff_t pgoff;
+       unsigned long new_flags = vma->vm_flags;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       switch (behavior) {
+       case MADV_NORMAL:
+               new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -126,6 +130,13 @@ success:
+       /*
+        * vm_flags is protected by the mmap_sem held in write mode.
+        */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m)
++              vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+       vma->vm_flags = new_flags;
+ out:
+@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
+                            struct vm_area_struct ** prev,
+                            unsigned long start, unsigned long end)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       *prev = vma;
+       if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+               return -EINVAL;
+@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
+               zap_page_range(vma, start, end - start, &details);
+       } else
+               zap_page_range(vma, start, end - start, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
++                      struct zap_details details = {
++                              .nonlinear_vma = vma_m,
++                              .last_index = ULONG_MAX,
++                      };
++                      zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
++              } else
++                      zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
++      }
++#endif
++
+       return 0;
+ }
+@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+       if (end < start)
+               return error;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return error;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return error;
++
+       error = 0;
+       if (end == start)
+               return error;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ceb0c7f..b2b8e94 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
+ int sysctl_memory_failure_recovery __read_mostly = 1;
+-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
++atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+ #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
+               pfn, t->comm, t->pid);
+       si.si_signo = SIGBUS;
+       si.si_errno = 0;
+-      si.si_addr = (void *)addr;
++      si.si_addr = (void __user *)addr;
+ #ifdef __ARCH_SI_TRAPNO
+       si.si_trapno = trapno;
+ #endif
+@@ -760,7 +760,7 @@ static struct page_state {
+       unsigned long res;
+       char *msg;
+       int (*action)(struct page *p, unsigned long pfn);
+-} error_states[] = {
++} __do_const error_states[] = {
+       { reserved,     reserved,       "reserved kernel",      me_kernel },
+       /*
+        * free pages are specially detected outside this table:
+@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+               nr_pages = 1 << compound_order(hpage);
+       else /* normal page or thp */
+               nr_pages = 1;
+-      atomic_long_add(nr_pages, &num_poisoned_pages);
++      atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
+       /*
+        * We need/can do nothing about count=0 pages.
+@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+                       if (!PageHWPoison(hpage)
+                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+                           || (p != hpage && TestSetPageHWPoison(hpage))) {
+-                              atomic_long_sub(nr_pages, &num_poisoned_pages);
++                              atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
+                               return 0;
+                       }
+                       set_page_hwpoison_huge_page(hpage);
+@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+       }
+       if (hwpoison_filter(p)) {
+               if (TestClearPageHWPoison(p))
+-                      atomic_long_sub(nr_pages, &num_poisoned_pages);
++                      atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
+               unlock_page(hpage);
+               put_page(hpage);
+               return 0;
+@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
+                       return 0;
+               }
+               if (TestClearPageHWPoison(p))
+-                      atomic_long_sub(nr_pages, &num_poisoned_pages);
++                      atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
+               pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
+               return 0;
+       }
+@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
+        */
+       if (TestClearPageHWPoison(page)) {
+               pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
+-              atomic_long_sub(nr_pages, &num_poisoned_pages);
++              atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
+               freeit = 1;
+               if (PageHuge(page))
+                       clear_page_hwpoison_huge_page(page);
+@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
+       } else {
+               set_page_hwpoison_huge_page(hpage);
+               dequeue_hwpoisoned_huge_page(hpage);
+-              atomic_long_add(1 << compound_trans_order(hpage),
++              atomic_long_add_unchecked(1 << compound_trans_order(hpage),
+                               &num_poisoned_pages);
+       }
+       /* keep elevated page count for bad page */
+@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
+               if (PageHuge(page)) {
+                       set_page_hwpoison_huge_page(hpage);
+                       dequeue_hwpoisoned_huge_page(hpage);
+-                      atomic_long_add(1 << compound_trans_order(hpage),
++                      atomic_long_add_unchecked(1 << compound_trans_order(hpage),
+                                       &num_poisoned_pages);
+               } else {
+                       SetPageHWPoison(page);
+-                      atomic_long_inc(&num_poisoned_pages);
++                      atomic_long_inc_unchecked(&num_poisoned_pages);
+               }
+       }
+       /* keep elevated page count for bad page */
+@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
+               put_page(page);
+               pr_info("soft_offline: %#lx: invalidated\n", pfn);
+               SetPageHWPoison(page);
+-              atomic_long_inc(&num_poisoned_pages);
++              atomic_long_inc_unchecked(&num_poisoned_pages);
+               return 0;
+       }
+@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
+                               ret = -EIO;
+               } else {
+                       SetPageHWPoison(page);
+-                      atomic_long_inc(&num_poisoned_pages);
++                      atomic_long_inc_unchecked(&num_poisoned_pages);
+               }
+       } else {
+               pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+diff --git a/mm/memory.c b/mm/memory.c
+index 5a35443..7c0340f 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+               free_pte_range(tlb, pmd, addr);
+       } while (pmd++, addr = next, addr != end);
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+       start &= PUD_MASK;
+       if (start < floor)
+               return;
+@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+       pmd = pmd_offset(pud, start);
+       pud_clear(pud);
+       pmd_free_tlb(tlb, pmd, start);
++#endif
++
+ }
+ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+               free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+       } while (pud++, addr = next, addr != end);
++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+       start &= PGDIR_MASK;
+       if (start < floor)
+               return;
+@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+       pud = pud_offset(pgd, start);
+       pgd_clear(pgd);
+       pud_free_tlb(tlb, pud, start);
++#endif
++
+ }
+ /*
+@@ -1644,12 +1650,6 @@ no_page_table:
+       return page;
+ }
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+-      return stack_guard_page_start(vma, addr) ||
+-             stack_guard_page_end(vma, addr+PAGE_SIZE);
+-}
+-
+ /**
+  * __get_user_pages() - pin user pages in memory
+  * @tsk:      task_struct of target task
+@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+       i = 0;
+-      do {
++      while (nr_pages) {
+               struct vm_area_struct *vma;
+-              vma = find_extend_vma(mm, start);
++              vma = find_vma(mm, start);
+               if (!vma && in_gate_area(mm, start)) {
+                       unsigned long pg = start & PAGE_MASK;
+                       pgd_t *pgd;
+@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                       goto next_page;
+               }
+-              if (!vma ||
++              if (!vma || start < vma->vm_start ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   !(vm_flags & vma->vm_flags))
+                       return i ? : -EFAULT;
+@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                               int ret;
+                               unsigned int fault_flags = 0;
+-                              /* For mlock, just skip the stack guard page. */
+-                              if (foll_flags & FOLL_MLOCK) {
+-                                      if (stack_guard_page(vma, start))
+-                                              goto next_page;
+-                              }
+                               if (foll_flags & FOLL_WRITE)
+                                       fault_flags |= FAULT_FLAG_WRITE;
+                               if (nonblocking)
+@@ -1901,7 +1896,7 @@ next_page:
+                       start += page_increm * PAGE_SIZE;
+                       nr_pages -= page_increm;
+               } while (nr_pages && start < vma->vm_end);
+-      } while (nr_pages);
++      }
+       return i;
+ }
+ EXPORT_SYMBOL(__get_user_pages);
+@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+       page_add_file_rmap(page);
+       set_pte_at(mm, addr, pte, mk_pte(page, prot));
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_file_pte(vma, addr, page, ptl);
++#endif
++
+       retval = 0;
+       pte_unmap_unlock(pte, ptl);
+       return retval;
+@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+       if (!page_count(page))
+               return -EINVAL;
+       if (!(vma->vm_flags & VM_MIXEDMAP)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              struct vm_area_struct *vma_m;
++#endif
++
+               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(vma->vm_flags & VM_PFNMAP);
+               vma->vm_flags |= VM_MIXEDMAP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              vma_m = pax_find_mirror_vma(vma);
++              if (vma_m)
++                      vma_m->vm_flags |= VM_MIXEDMAP;
++#endif
++
+       }
+       return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+ {
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++      BUG_ON(vma->vm_mirror);
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+       BUG_ON(pud_huge(*pud));
+-      pmd = pmd_alloc(mm, pud, addr);
++      pmd = (mm == &init_mm) ?
++              pmd_alloc_kernel(mm, pud, addr) :
++              pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+       unsigned long next;
+       int err;
+-      pud = pud_alloc(mm, pgd, addr);
++      pud = (mm == &init_mm) ?
++              pud_alloc_kernel(mm, pgd, addr) :
++              pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+               copy_user_highpage(dst, src, va, vma);
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      spinlock_t *ptl;
++      pte_t *pte, entry;
++
++      pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++      entry = *pte;
++      if (!pte_present(entry)) {
++              if (!pte_none(entry)) {
++                      BUG_ON(pte_file(entry));
++                      free_swap_and_cache(pte_to_swp_entry(entry));
++                      pte_clear_not_present_full(mm, address, pte, 0);
++              }
++      } else {
++              struct page *page;
++
++              flush_cache_page(vma, address, pte_pfn(entry));
++              entry = ptep_clear_flush(vma, address, pte);
++              BUG_ON(pte_dirty(entry));
++              page = vm_normal_page(vma, address, entry);
++              if (page) {
++                      update_hiwater_rss(mm);
++                      if (PageAnon(page))
++                              dec_mm_counter_fast(mm, MM_ANONPAGES);
++                      else
++                              dec_mm_counter_fast(mm, MM_FILEPAGES);
++                      page_remove_rmap(page);
++                      page_cache_release(page);
++              }
++      }
++      pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      BUG_ON(!page_m || !PageAnon(page_m));
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(!PageLocked(page_m));
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++      page_cache_get(page_m);
++      page_add_anon_rmap(page_m, vma_m, address_m);
++      inc_mm_counter_fast(mm, MM_ANONPAGES);
++      set_pte_at(mm, address_m, pte_m, entry_m);
++      update_mmu_cache(vma_m, address_m, pte_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++      unlock_page(page_m);
++}
++
++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      BUG_ON(!page_m || PageAnon(page_m));
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++      page_cache_get(page_m);
++      page_add_file_rmap(page_m);
++      inc_mm_counter_fast(mm, MM_FILEPAGES);
++      set_pte_at(mm, address_m, pte_m, entry_m);
++      update_mmu_cache(vma_m, address_m, pte_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++      set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++      struct page *page_m;
++      pte_t entry;
++
++      if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++              goto out;
++
++      entry = *pte;
++      page_m  = vm_normal_page(vma, address, entry);
++      if (!page_m)
++              pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++      else if (PageAnon(page_m)) {
++              if (pax_find_mirror_vma(vma)) {
++                      pte_unmap_unlock(pte, ptl);
++                      lock_page(page_m);
++                      pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++                      if (pte_same(entry, *pte))
++                              pax_mirror_anon_pte(vma, address, page_m, ptl);
++                      else
++                              unlock_page(page_m);
++              }
++      } else
++              pax_mirror_file_pte(vma, address, page_m, ptl);
++
++out:
++      pte_unmap_unlock(pte, ptl);
++}
++#endif
++
+ /*
+  * This routine handles present pages, when users try to write
+  * to a shared page. It is done by copying the page to a new address
+@@ -2808,6 +3004,12 @@ gotten:
+        */
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (pax_find_mirror_vma(vma))
++                      BUG_ON(!trylock_page(new_page));
++#endif
++
+               if (old_page) {
+                       if (!PageAnon(old_page)) {
+                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+@@ -2859,6 +3061,10 @@ gotten:
+                       page_remove_rmap(old_page);
+               }
++#ifdef CONFIG_PAX_SEGMEXEC
++              pax_mirror_anon_pte(vma, address, new_page, ptl);
++#endif
++
+               /* Free the old page.. */
+               new_page = old_page;
+               ret |= VM_FAULT_WRITE;
+@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+       swap_free(entry);
+       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+               try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+       unlock_page(page);
+       if (page != swapcache) {
+               /*
+@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+       pte_unmap_unlock(page_table, ptl);
+ out:
+@@ -3176,40 +3392,6 @@ out_release:
+ }
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+-      address &= PAGE_MASK;
+-      if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+-              struct vm_area_struct *prev = vma->vm_prev;
+-
+-              /*
+-               * Is there a mapping abutting this one below?
+-               *
+-               * That's only ok if it's the same stack mapping
+-               * that has gotten split..
+-               */
+-              if (prev && prev->vm_end == address)
+-                      return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+-              expand_downwards(vma, address - PAGE_SIZE);
+-      }
+-      if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+-              struct vm_area_struct *next = vma->vm_next;
+-
+-              /* As VM_GROWSDOWN but s/below/above/ */
+-              if (next && next->vm_start == address + PAGE_SIZE)
+-                      return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+-              expand_upwards(vma, address + PAGE_SIZE);
+-      }
+-      return 0;
+-}
+-
+-/*
+  * We enter with non-exclusive mmap_sem (to exclude vma changes,
+  * but allow concurrent faults), and pte mapped but not yet locked.
+  * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+               unsigned long address, pte_t *page_table, pmd_t *pmd,
+               unsigned int flags)
+ {
+-      struct page *page;
++      struct page *page = NULL;
+       spinlock_t *ptl;
+       pte_t entry;
+-      pte_unmap(page_table);
+-
+-      /* Check if we need to add a guard page to the stack */
+-      if (check_stack_guard_page(vma, address) < 0)
+-              return VM_FAULT_SIGBUS;
+-
+-      /* Use the zero-page for reads */
+       if (!(flags & FAULT_FLAG_WRITE)) {
+               entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+                                               vma->vm_page_prot));
+-              page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++              ptl = pte_lockptr(mm, pmd);
++              spin_lock(ptl);
+               if (!pte_none(*page_table))
+                       goto unlock;
+               goto setpte;
+       }
+       /* Allocate our own private page. */
++      pte_unmap(page_table);
++
+       if (unlikely(anon_vma_prepare(vma)))
+               goto oom;
+       page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+       if (!pte_none(*page_table))
+               goto release;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_find_mirror_vma(vma))
++              BUG_ON(!trylock_page(page));
++#endif
++
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
+       page_add_new_anon_rmap(page, vma, address);
+ setpte:
+@@ -3269,6 +3452,12 @@ setpte:
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (page)
++              pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+       pte_unmap_unlock(page_table, ptl);
+       return 0;
+@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+        */
+       /* Only go through if we didn't race with anybody else... */
+       if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (anon && pax_find_mirror_vma(vma))
++                      BUG_ON(!trylock_page(page));
++#endif
++
+               flush_icache_page(vma, page);
+               entry = mk_pte(page, vma->vm_page_prot);
+               if (flags & FAULT_FLAG_WRITE)
+@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+               /* no need to invalidate: a not-present page won't be cached */
+               update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (anon)
++                      pax_mirror_anon_pte(vma, address, page, ptl);
++              else
++                      pax_mirror_file_pte(vma, address, page, ptl);
++#endif
++
+       } else {
+               if (cow_page)
+                       mem_cgroup_uncharge_page(cow_page);
+@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
+               if (flags & FAULT_FLAG_WRITE)
+                       flush_tlb_fix_spurious_fault(vma, address);
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_pte(vma, address, pte, pmd, ptl);
++      return 0;
++#endif
++
+ unlock:
+       pte_unmap_unlock(pte, ptl);
+       return 0;
+@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+       pmd_t *pmd;
+       pte_t *pte;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       __set_current_state(TASK_RUNNING);
+       count_vm_event(PGFAULT);
+@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+       if (unlikely(is_vm_hugetlb_page(vma)))
+               return hugetlb_fault(mm, vma, address, flags);
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              unsigned long address_m;
++              pgd_t *pgd_m;
++              pud_t *pud_m;
++              pmd_t *pmd_m;
++
++              if (vma->vm_start > vma_m->vm_start) {
++                      address_m = address;
++                      address -= SEGMEXEC_TASK_SIZE;
++                      vma = vma_m;
++              } else
++                      address_m = address + SEGMEXEC_TASK_SIZE;
++
++              pgd_m = pgd_offset(mm, address_m);
++              pud_m = pud_alloc(mm, pgd_m, address_m);
++              if (!pud_m)
++                      return VM_FAULT_OOM;
++              pmd_m = pmd_alloc(mm, pud_m, address_m);
++              if (!pmd_m)
++                      return VM_FAULT_OOM;
++              if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
++                      return VM_FAULT_OOM;
++              pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++      }
++#endif
++
+ retry:
+       pgd = pgd_offset(mm, address);
+       pud = pud_alloc(mm, pgd, address);
+@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+ }
++
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++      pud_t *new = pud_alloc_one(mm, address);
++      if (!new)
++              return -ENOMEM;
++
++      smp_wmb(); /* See comment in __pte_alloc */
++
++      spin_lock(&mm->page_table_lock);
++      if (pgd_present(*pgd))          /* Another has populated it */
++              pud_free(mm, new);
++      else
++              pgd_populate_kernel(mm, pgd, new);
++      spin_unlock(&mm->page_table_lock);
++      return 0;
++}
+ #endif /* __PAGETABLE_PUD_FOLDED */
+ #ifndef __PAGETABLE_PMD_FOLDED
+@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+ }
++
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++      pmd_t *new = pmd_alloc_one(mm, address);
++      if (!new)
++              return -ENOMEM;
++
++      smp_wmb(); /* See comment in __pte_alloc */
++
++      spin_lock(&mm->page_table_lock);
++#ifndef __ARCH_HAS_4LEVEL_HACK
++      if (pud_present(*pud))          /* Another has populated it */
++              pmd_free(mm, new);
++      else
++              pud_populate_kernel(mm, pud, new);
++#else
++      if (pgd_present(*pud))          /* Another has populated it */
++              pmd_free(mm, new);
++      else
++              pgd_populate_kernel(mm, pud, new);
++#endif /* __ARCH_HAS_4LEVEL_HACK */
++      spin_unlock(&mm->page_table_lock);
++      return 0;
++}
+ #endif /* __PAGETABLE_PMD_FOLDED */
+ #if !defined(__HAVE_ARCH_GATE_AREA)
+@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+-      gate_vma.vm_page_prot = __P101;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+@@ -4054,8 +4336,8 @@ out:
+       return ret;
+ }
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+-                      void *buf, int len, int write)
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++                      void *buf, size_t len, int write)
+ {
+       resource_size_t phys_addr;
+       unsigned long prot = 0;
+@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+  * Access another process' address space as given in mm.  If non-NULL, use the
+  * given task for page fault accounting.
+  */
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+-              unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++              unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct vm_area_struct *vma;
+       void *old_buf = buf;
+@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+       down_read(&mm->mmap_sem);
+       /* ignore errors, just check how much was successfully transferred */
+       while (len) {
+-              int bytes, ret, offset;
++              ssize_t bytes, ret, offset;
+               void *maddr;
+               struct page *page = NULL;
+@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+  *
+  * The caller must hold a reference on @mm.
+  */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+  * Source/target buffer must be kernel space,
+  * Do not walk the page table directly, use get_user_pages
+  */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       struct mm_struct *mm;
+-      int ret;
++      ssize_t ret;
+       mm = get_task_mm(tsk);
+       if (!mm)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4baf12e..5497066 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+       unsigned long vmstart;
+       unsigned long vmend;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       vma = find_vma(mm, start);
+       if (!vma || vma->vm_start > start)
+               return -EFAULT;
+@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+               err = vma_replace_policy(vma, new_pol);
+               if (err)
+                       goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              vma_m = pax_find_mirror_vma(vma);
++              if (vma_m) {
++                      err = vma_replace_policy(vma_m, new_pol);
++                      if (err)
++                              goto out;
++              }
++#endif
++
+       }
+  out:
+@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+       if (end < start)
+               return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return -EINVAL;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       if (end == start)
+               return 0;
+@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+        */
+       tcred = __task_cred(task);
+       if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
+-          !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
+-          !capable(CAP_SYS_NICE)) {
++          !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
+               rcu_read_unlock();
+               err = -EPERM;
+               goto out_put;
+@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+               goto out;
+       }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (mm != current->mm &&
++          (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++              mmput(mm);
++              err = -EPERM;
++              goto out;
++      }
++#endif
++
+       err = do_migrate_pages(mm, old, new,
+               capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 6f0c244..6d1ae32 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+        */
+       tcred = __task_cred(task);
+       if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
+-          !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
+-          !capable(CAP_SYS_NICE)) {
++          !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
+               rcu_read_unlock();
+               err = -EPERM;
+               goto out;
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 79b7cf7..9944291 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <linux/security.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/rmap.h>
+@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ {
+       unsigned long nstart, end, tmp;
+       struct vm_area_struct * vma, * prev;
+-      int error;
++      int error = 0;
+       VM_BUG_ON(start & ~PAGE_MASK);
+       VM_BUG_ON(len != PAGE_ALIGN(len));
+@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
+               return -EINVAL;
+       if (end == start)
+               return 0;
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       vma = find_vma(current->mm, start);
+       if (!vma || vma->vm_start > start)
+               return -ENOMEM;
+@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
+       for (nstart = start ; ; ) {
+               vm_flags_t newflags;
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++                      break;
++#endif
++
+               /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
+               newflags = vma->vm_flags & ~VM_LOCKED;
+@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+       lock_limit >>= PAGE_SHIFT;
+       /* check against resource limits */
++      gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
+       if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+               error = do_mlock(start, len, 1);
+       up_write(&current->mm->mmap_sem);
+@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
+       for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+               vm_flags_t newflags;
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++                      break;
++#endif
++
+               newflags = vma->vm_flags & ~VM_LOCKED;
+               if (flags & MCL_CURRENT)
+                       newflags |= VM_LOCKED;
+@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+       lock_limit >>= PAGE_SHIFT;
+       ret = -ENOMEM;
++      gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
+       if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+           capable(CAP_IPC_LOCK))
+               ret = do_mlockall(flags);
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 8d25fdc..bfb7626 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -36,6 +36,7 @@
+ #include <linux/sched/sysctl.h>
+ #include <linux/notifier.h>
+ #include <linux/memory.h>
++#include <linux/random.h>
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -52,6 +53,16 @@
+ #define arch_rebalance_pgtables(addr, len)            (addr)
+ #endif
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++      if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++              up_read(&mm->mmap_sem);
++              BUG();
++      }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+               struct vm_area_struct *vma, struct vm_area_struct *prev,
+               unsigned long start, unsigned long end);
+@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
+  *            x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
+  *
+  */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+       __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+-pgprot_t vm_get_page_prot(unsigned long vm_flags)
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+ {
+-      return __pgprot(pgprot_val(protection_map[vm_flags &
++      pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+                       pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++      if (!(__supported_pte_mask & _PAGE_NX) &&
++          (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++          (vm_flags & (VM_READ | VM_WRITE)))
++              prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++      return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50;     /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
+ unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
+ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ /*
+  * Make sure vm_committed_as in one cacheline and not cacheline shared with
+  * other variables. It can be updated by several CPUs frequently.
+@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+       struct vm_area_struct *next = vma->vm_next;
+       might_sleep();
++      BUG_ON(vma->vm_mirror);
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+       if (vma->vm_file)
+@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+        * not page aligned -Ram Gupta
+        */
+       rlim = rlimit(RLIMIT_DATA);
++      gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
+       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+                       (mm->end_data - mm->start_data) > rlim)
+               goto out;
+@@ -933,6 +956,12 @@ static int
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++              return 0;
++#endif
++
+       if (is_mergeable_vma(vma, file, vm_flags) &&
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+               if (vma->vm_pgoff == vm_pgoff)
+@@ -952,6 +981,12 @@ static int
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++              return 0;
++#endif
++
+       if (is_mergeable_vma(vma, file, vm_flags) &&
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+               pgoff_t vm_pglen;
+@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+                       struct vm_area_struct *prev, unsigned long addr,
+                       unsigned long end, unsigned long vm_flags,
+-                      struct anon_vma *anon_vma, struct file *file,
++                      struct anon_vma *anon_vma, struct file *file,
+                       pgoff_t pgoff, struct mempolicy *policy)
+ {
+       pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+       struct vm_area_struct *area, *next;
+       int err;
++#ifdef CONFIG_PAX_SEGMEXEC
++      unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++      struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++      BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+       /*
+        * We later require that vma->vm_flags == vm_flags,
+        * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+       if (next && next->vm_end == end)                /* cases 6, 7, 8 */
+               next = next->vm_next;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (prev)
++              prev_m = pax_find_mirror_vma(prev);
++      if (area)
++              area_m = pax_find_mirror_vma(area);
++      if (next)
++              next_m = pax_find_mirror_vma(next);
++#endif
++
+       /*
+        * Can it merge with the predecessor?
+        */
+@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+                                                       /* cases 1, 6 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               next->vm_end, prev->vm_pgoff, NULL);
+-              } else                                  /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                      next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++              } else {                                /* cases 2, 5, 7 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                              end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++              }
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(prev);
+@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+                       mpol_equal(policy, vma_policy(next)) &&
+                       can_vma_merge_before(next, vm_flags,
+                                       anon_vma, file, pgoff+pglen)) {
+-              if (prev && addr < prev->vm_end)        /* case 4 */
++              if (prev && addr < prev->vm_end) {      /* case 4 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               addr, prev->vm_pgoff, NULL);
+-              else                                    /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                              addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++              } else {                                /* cases 3, 8 */
+                       err = vma_adjust(area, addr, next->vm_end,
+                               next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && area_m)
++                              err = vma_adjust(area_m, addr_m, next_m->vm_end,
++                                              next_m->vm_pgoff - pglen, NULL);
++#endif
++
++              }
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(area);
+@@ -1165,8 +1246,10 @@ none:
+ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+                                               struct file *file, long pages)
+ {
+-      const unsigned long stack_flags
+-              = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
+       mm->total_vm += pages;
+@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+               mm->shared_vm += pages;
+               if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+                       mm->exec_vm += pages;
+-      } else if (flags & stack_flags)
++      } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
+               mm->stack_vm += pages;
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+        * (the exception is when the underlying filesystem is noexec
+        *  mounted, in which case we dont add PROT_EXEC.)
+        */
+-      if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++      if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+               if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+                       prot |= PROT_EXEC;
+@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+       /* Obtain the address to map to. we verify (or select) it and ensure
+        * that it represents a valid section of the address space.
+        */
+-      addr = get_unmapped_area(file, addr, len, pgoff, flags);
++      addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+       if (addr & ~PAGE_MASK)
+               return addr;
+@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+       vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+                       mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->pax_flags & MF_PAX_MPROTECT) {
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++              if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
++                  mm->binfmt->handle_mmap)
++                      mm->binfmt->handle_mmap(file);
++#endif
++
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
++                      gr_log_rwxmmap(file);
++
++#ifdef CONFIG_PAX_EMUPLT
++                      vm_flags &= ~VM_EXEC;
++#else
++                      return -EPERM;
++#endif
++
++              }
++
++              if (!(vm_flags & VM_EXEC))
++                      vm_flags &= ~VM_MAYEXEC;
++#else
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++                      vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++              else
++                      vm_flags &= ~VM_MAYWRITE;
++      }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++      if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++              vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+       if (flags & MAP_LOCKED)
+               if (!can_do_mlock())
+                       return -EPERM;
+@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+               locked += mm->locked_vm;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
+               lock_limit >>= PAGE_SHIFT;
++              gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+                       return -EAGAIN;
+       }
+@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+                       vm_flags |= VM_NORESERVE;
+       }
++      if (!gr_acl_handle_mmap(file, prot))
++              return -EACCES;
++      
+       addr = mmap_region(file, addr, len, vm_flags, pgoff);
+       if (!IS_ERR_VALUE(addr) &&
+           ((vm_flags & VM_LOCKED) ||
+@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+       vm_flags_t vm_flags = vma->vm_flags;
+       /* If it was private or non-writable, the write bit is already clear */
+-      if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++      if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+               return 0;
+       /* The backer wishes to know when pages are first written to? */
+@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+       unsigned long charged = 0;
+       struct inode *inode =  file ? file_inode(file) : NULL;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++#endif
++
++      /*
++       * mm->mmap_sem is required to protect against another thread
++       * changing the mappings in case we sleep.
++       */
++      verify_mm_writelocked(mm);
++
+       /* Check against address space limit. */
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
+       if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
+               unsigned long nr_pages;
+@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+       /* Clear old maps */
+       error = -ENOMEM;
+-munmap_back:
+       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
+               if (do_munmap(mm, addr, len))
+                       return -ENOMEM;
+-              goto munmap_back;
++              BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
+       }
+       /*
+@@ -1534,6 +1672,16 @@ munmap_back:
+               goto unacct_error;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++              vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++              if (!vma_m) {
++                      error = -ENOMEM;
++                      goto free_vma;
++              }
++      }
++#endif
++
+       vma->vm_mm = mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
+@@ -1558,6 +1706,13 @@ munmap_back:
+               if (error)
+                       goto unmap_and_free_vma;
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++                      vma->vm_flags |= VM_PAGEEXEC;
++                      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++              }
++#endif
++
+               /* Can addr have changed??
+                *
+                * Answer: Yes, several device drivers can do it in their
+@@ -1596,6 +1751,11 @@ munmap_back:
+       vma_link(mm, vma, prev, rb_link, rb_parent);
+       file = vma->vm_file;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+       /* Once vma denies write, undo our temporary denial count */
+       if (correct_wcount)
+               atomic_inc(&inode->i_writecount);
+@@ -1603,6 +1763,7 @@ out:
+       perf_event_mmap(vma);
+       vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
++      track_exec_limit(mm, addr, addr + len, vm_flags);
+       if (vm_flags & VM_LOCKED) {
+               if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
+                                       vma == get_gate_vma(current->mm)))
+@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
+       unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+       charged = 0;
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+       kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+       if (charged)
+@@ -1633,7 +1800,63 @@ unacct_error:
+       return error;
+ }
+-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++      if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++              return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
++
++      return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
++{
++      if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++              if (addr > sysctl_heap_stack_gap)
++                      vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++              else
++                      vma = find_vma(current->mm, 0);
++              if (vma && (vma->vm_flags & VM_GROWSUP))
++                      return false;
++#endif
++              return true;
++      }
++
++      if (addr + len > vma->vm_start)
++              return false;
++
++      if (vma->vm_flags & VM_GROWSDOWN)
++              return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++      else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++              return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
++#endif
++      else if (offset)
++              return offset <= vma->vm_start - addr - len;
++
++      return true;
++}
++
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
++{
++      if (vma->vm_start < len)
++              return -ENOMEM;
++
++      if (!(vma->vm_flags & VM_GROWSDOWN)) {
++              if (offset <= vma->vm_start - len)
++                      return vma->vm_start - len - offset;
++              else
++                      return -ENOMEM;
++      }
++
++      if (sysctl_heap_stack_gap <= vma->vm_start - len)
++              return vma->vm_start - len - sysctl_heap_stack_gap;
++      return -ENOMEM;
++}
++
++unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
+ {
+       /*
+        * We implement the search by looking for an rbtree node that
+@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+                       }
+               }
+-              gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
++              gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
+ check_current:
+               /* Check if current node has a suitable gap */
+               if (gap_start > high_limit)
+                       return -ENOMEM;
++
++              if (gap_end - gap_start > info->threadstack_offset)
++                      gap_start += info->threadstack_offset;
++              else
++                      gap_start = gap_end;
++
++              if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
++                      if (gap_end - gap_start > sysctl_heap_stack_gap)
++                              gap_start += sysctl_heap_stack_gap;
++                      else
++                              gap_start = gap_end;
++              }
++              if (vma->vm_flags & VM_GROWSDOWN) {
++                      if (gap_end - gap_start > sysctl_heap_stack_gap)
++                              gap_end -= sysctl_heap_stack_gap;
++                      else
++                              gap_end = gap_start;
++              }
+               if (gap_end >= low_limit && gap_end - gap_start >= length)
+                       goto found;
+@@ -1735,7 +1976,7 @@ found:
+       return gap_start;
+ }
+-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
++unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
+ {
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+@@ -1789,6 +2030,24 @@ check_current:
+               gap_end = vma->vm_start;
+               if (gap_end < low_limit)
+                       return -ENOMEM;
++
++              if (gap_end - gap_start > info->threadstack_offset)
++                      gap_end -= info->threadstack_offset;
++              else
++                      gap_end = gap_start;
++
++              if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
++                      if (gap_end - gap_start > sysctl_heap_stack_gap)
++                              gap_start += sysctl_heap_stack_gap;
++                      else
++                              gap_start = gap_end;
++              }
++              if (vma->vm_flags & VM_GROWSDOWN) {
++                      if (gap_end - gap_start > sysctl_heap_stack_gap)
++                              gap_end -= sysctl_heap_stack_gap;
++                      else
++                              gap_end = gap_start;
++              }
+               if (gap_start <= high_limit && gap_end - gap_start >= length)
+                       goto found;
+@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (flags & MAP_FIXED)
+               return addr;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              info.low_limit += mm->delta_mmap;
++#endif
++
+       info.high_limit = TASK_SIZE;
+       info.align_mask = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ #endif        
+ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++              return;
++#endif
++
+       /*
+        * Is this a new hole at the lowest possible address?
+        */
+-      if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
++      if (addr >= mm->mmap_base && addr < mm->free_area_cache)
+               mm->free_area_cache = addr;
+ }
+@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (flags & MAP_FIXED)
+               return addr;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.low_limit = PAGE_SIZE;
+       info.high_limit = mm->mmap_base;
+       info.align_mask = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++              return;
++#endif
++
+       /*
+        * Is this a new hole at the highest possible address?
+        */
+@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+               mm->free_area_cache = addr;
+       /* dont allow allocations above current base */
+-      if (mm->free_area_cache > mm->mmap_base)
++      if (mm->free_area_cache > mm->mmap_base) {
+               mm->free_area_cache = mm->mmap_base;
++              mm->cached_hole_size = ~0UL;
++      }
+ }
+ unsigned long
+@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+       return vma;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++      struct vm_area_struct *vma_m;
++
++      BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++      if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++              BUG_ON(vma->vm_mirror);
++              return NULL;
++      }
++      BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++      vma_m = vma->vm_mirror;
++      BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++      BUG_ON(vma->vm_file != vma_m->vm_file);
++      BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++      BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
++      BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
++      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
++      return vma_m;
++}
++#endif
++
+ /*
+  * Verify that the stack growth is acceptable and
+  * update accounting. This is shared with both the
+@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+               return -ENOMEM;
+       /* Stack limit test */
++      gr_learn_resource(current, RLIMIT_STACK, size, 1);
+       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+               return -ENOMEM;
+@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+               locked = mm->locked_vm + grow;
+               limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+               limit >>= PAGE_SHIFT;
++              gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+               if (locked > limit && !capable(CAP_IPC_LOCK))
+                       return -ENOMEM;
+       }
+@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+  * vma is the last one with address > vma->vm_end.  Have to extend vma.
+  */
++#ifndef CONFIG_IA64
++static
++#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+       int error;
++      bool locknext;
+       if (!(vma->vm_flags & VM_GROWSUP))
+               return -EFAULT;
++      /* Also guard against wrapping around to address 0. */
++      if (address < PAGE_ALIGN(address+1))
++              address = PAGE_ALIGN(address+1);
++      else
++              return -ENOMEM;
++
+       /*
+        * We must make sure the anon_vma is allocated
+        * so that the anon_vma locking is not a noop.
+        */
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
++      locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++      if (locknext && anon_vma_prepare(vma->vm_next))
++              return -ENOMEM;
+       vma_lock_anon_vma(vma);
++      if (locknext)
++              vma_lock_anon_vma(vma->vm_next);
+       /*
+        * vma->vm_start/vm_end cannot change under us because the caller
+        * is required to hold the mmap_sem in read mode.  We need the
+-       * anon_vma lock to serialize against concurrent expand_stacks.
+-       * Also guard against wrapping around to address 0.
++       * anon_vma locks to serialize against concurrent expand_stacks
++       * and expand_upwards.
+        */
+-      if (address < PAGE_ALIGN(address+4))
+-              address = PAGE_ALIGN(address+4);
+-      else {
+-              vma_unlock_anon_vma(vma);
+-              return -ENOMEM;
+-      }
+       error = 0;
+       /* Somebody else might have raced and expanded it already */
+-      if (address > vma->vm_end) {
++      if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++              error = -ENOMEM;
++      else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+               unsigned long size, grow;
+               size = address - vma->vm_start;
+@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+                       }
+               }
+       }
++      if (locknext)
++              vma_unlock_anon_vma(vma->vm_next);
+       vma_unlock_anon_vma(vma);
+       khugepaged_enter_vma_merge(vma);
+       validate_mm(vma->vm_mm);
+@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
+                                  unsigned long address)
+ {
+       int error;
++      bool lockprev = false;
++      struct vm_area_struct *prev;
+       /*
+        * We must make sure the anon_vma is allocated
+@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
+       if (error)
+               return error;
++      prev = vma->vm_prev;
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++      lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++      if (lockprev && anon_vma_prepare(prev))
++              return -ENOMEM;
++      if (lockprev)
++              vma_lock_anon_vma(prev);
++
+       vma_lock_anon_vma(vma);
+       /*
+@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
+        */
+       /* Somebody else might have raced and expanded it already */
+-      if (address < vma->vm_start) {
++      if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++              error = -ENOMEM;
++      else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+               unsigned long size, grow;
++#ifdef CONFIG_PAX_SEGMEXEC
++              struct vm_area_struct *vma_m;
++
++              vma_m = pax_find_mirror_vma(vma);
++#endif
++
+               size = vma->vm_end - address;
+               grow = (vma->vm_start - address) >> PAGE_SHIFT;
+@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
+                               vma->vm_pgoff -= grow;
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               vma_gap_update(vma);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                              if (vma_m) {
++                                      anon_vma_interval_tree_pre_update_vma(vma_m);
++                                      vma_m->vm_start -= grow << PAGE_SHIFT;
++                                      vma_m->vm_pgoff -= grow;
++                                      anon_vma_interval_tree_post_update_vma(vma_m);
++                                      vma_gap_update(vma_m);
++                              }
++#endif
++
+                               spin_unlock(&vma->vm_mm->page_table_lock);
++                              track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
+                               perf_event_mmap(vma);
+                       }
+               }
+       }
+       vma_unlock_anon_vma(vma);
++      if (lockprev)
++              vma_unlock_anon_vma(prev);
+       khugepaged_enter_vma_merge(vma);
+       validate_mm(vma->vm_mm);
+       return error;
+@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+       do {
+               long nrpages = vma_pages(vma);
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++                      vma = remove_vma(vma);
++                      continue;
++              }
++#endif
++
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += nrpages;
+               vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+       insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
+       do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (vma->vm_mirror) {
++                      BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++                      vma->vm_mirror->vm_mirror = NULL;
++                      vma->vm_mirror->vm_flags &= ~VM_EXEC;
++                      vma->vm_mirror = NULL;
++              }
++#endif
++
+               vma_rb_erase(vma, &mm->mm_rb);
+               mm->map_count--;
+               tail_vma = vma;
+@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+       struct vm_area_struct *new;
+       int err = -ENOMEM;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m, *new_m = NULL;
++      unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++#endif
++
+       if (is_vm_hugetlb_page(vma) && (addr &
+                                       ~(huge_page_mask(hstate_vma(vma)))))
+               return -EINVAL;
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++#endif
++
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       if (!new)
+               goto out_err;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m) {
++              new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++              if (!new_m) {
++                      kmem_cache_free(vm_area_cachep, new);
++                      goto out_err;
++              }
++      }
++#endif
++
+       /* most fields are the same, copy all, and then fixup */
+       *new = *vma;
+@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+               new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m) {
++              *new_m = *vma_m;
++              INIT_LIST_HEAD(&new_m->anon_vma_chain);
++              new_m->vm_mirror = new;
++              new->vm_mirror = new_m;
++
++              if (new_below)
++                      new_m->vm_end = addr_m;
++              else {
++                      new_m->vm_start = addr_m;
++                      new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++              }
++      }
++#endif
++
+       pol = mpol_dup(vma_policy(vma));
+       if (IS_ERR(pol)) {
+               err = PTR_ERR(pol);
+@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+       else
+               err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!err && vma_m) {
++              if (anon_vma_clone(new_m, vma_m))
++                      goto out_free_mpol;
++
++              mpol_get(pol);
++              vma_set_policy(new_m, pol);
++
++              if (new_m->vm_file)
++                      get_file(new_m->vm_file);
++
++              if (new_m->vm_ops && new_m->vm_ops->open)
++                      new_m->vm_ops->open(new_m);
++
++              if (new_below)
++                      err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++                              ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++              else
++                      err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++
++              if (err) {
++                      if (new_m->vm_ops && new_m->vm_ops->close)
++                              new_m->vm_ops->close(new_m);
++                      if (new_m->vm_file)
++                              fput(new_m->vm_file);
++                      mpol_put(pol);
++              }
++      }
++#endif
++
+       /* Success. */
+       if (!err)
+               return 0;
+@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+               new->vm_ops->close(new);
+       if (new->vm_file)
+               fput(new->vm_file);
+-      unlink_anon_vmas(new);
+  out_free_mpol:
+       mpol_put(pol);
+  out_free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (new_m) {
++              unlink_anon_vmas(new_m);
++              kmem_cache_free(vm_area_cachep, new_m);
++      }
++#endif
++
++      unlink_anon_vmas(new);
+       kmem_cache_free(vm_area_cachep, new);
+  out_err:
+       return err;
+@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+             unsigned long addr, int new_below)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++              if (mm->map_count >= sysctl_max_map_count-1)
++                      return -ENOMEM;
++      } else
++#endif
++
+       if (mm->map_count >= sysctl_max_map_count)
+               return -ENOMEM;
+@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+  * work.  This now handles partial unmappings.
+  * Jeremy Fitzhardinge <jeremy@goop.org>
+  */
++#ifdef CONFIG_PAX_SEGMEXEC
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+ {
++      int ret = __do_munmap(mm, start, len);
++      if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++              return ret;
++
++      return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
++{
+       unsigned long end;
+       struct vm_area_struct *vma, *prev, *last;
++      /*
++       * mm->mmap_sem is required to protect against another thread
++       * changing the mappings in case we sleep.
++       */
++      verify_mm_writelocked(mm);
++
+       if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+               return -EINVAL;
+@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+       /* Fix up all other VM information */
+       remove_vma_list(mm, vma);
++      track_exec_limit(mm, start, end, 0UL);
++
+       return 0;
+ }
+@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
+       int ret;
+       struct mm_struct *mm = current->mm;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++          (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
++              return -EINVAL;
++#endif
++
+       down_write(&mm->mmap_sem);
+       ret = do_munmap(mm, start, len);
+       up_write(&mm->mmap_sem);
+@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+       return vm_munmap(addr, len);
+ }
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+-      if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+-              WARN_ON(1);
+-              up_read(&mm->mmap_sem);
+-      }
+-#endif
+-}
+-
+ /*
+  *  this is really a simplified "do_mmap".  it only handles
+  *  anonymous maps.  eventually we may be able to do some
+@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+       struct rb_node ** rb_link, * rb_parent;
+       pgoff_t pgoff = addr >> PAGE_SHIFT;
+       int error;
++      unsigned long charged;
+       len = PAGE_ALIGN(len);
+       if (!len)
+@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (mm->pax_flags & MF_PAX_MPROTECT)
++                      flags &= ~VM_MAYEXEC;
++#endif
++
++      }
++#endif
++
+       error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+       if (error & ~PAGE_MASK)
+               return error;
++      charged = len >> PAGE_SHIFT;
++
+       /*
+        * mlock MCL_FUTURE?
+        */
+       if (mm->def_flags & VM_LOCKED) {
+               unsigned long locked, lock_limit;
+-              locked = len >> PAGE_SHIFT;
++              locked = charged;
+               locked += mm->locked_vm;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
+               lock_limit >>= PAGE_SHIFT;
+@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+       /*
+        * Clear old maps.  this also does some error checking for us
+        */
+- munmap_back:
+       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
+               if (do_munmap(mm, addr, len))
+                       return -ENOMEM;
+-              goto munmap_back;
++              BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
+       }
+       /* Check against address space limits *after* clearing old maps... */
+-      if (!may_expand_vm(mm, len >> PAGE_SHIFT))
++      if (!may_expand_vm(mm, charged))
+               return -ENOMEM;
+       if (mm->map_count > sysctl_max_map_count)
+               return -ENOMEM;
+-      if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
++      if (security_vm_enough_memory_mm(mm, charged))
+               return -ENOMEM;
+       /* Can we just expand an old private anonymous mapping? */
+@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+        */
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       if (!vma) {
+-              vm_unacct_memory(len >> PAGE_SHIFT);
++              vm_unacct_memory(charged);
+               return -ENOMEM;
+       }
+@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+       vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+       perf_event_mmap(vma);
+-      mm->total_vm += len >> PAGE_SHIFT;
++      mm->total_vm += charged;
+       if (flags & VM_LOCKED)
+-              mm->locked_vm += (len >> PAGE_SHIFT);
++              mm->locked_vm += charged;
++      track_exec_limit(mm, addr, addr + len, flags);
+       return addr;
+ }
+@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
+       while (vma) {
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += vma_pages(vma);
++              vma->vm_mirror = NULL;
+               vma = remove_vma(vma);
+       }
+       vm_unacct_memory(nr_accounted);
+@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+       struct vm_area_struct *prev;
+       struct rb_node **rb_link, *rb_parent;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++#endif
++
++      if (security_mmap_addr(vma->vm_start))
++              return -EPERM;
++
+       /*
+        * The vm_pgoff of a purely anonymous vma should be irrelevant
+        * until its first write fault, when page's anon_vma and index
+@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+            security_vm_enough_memory_mm(mm, vma_pages(vma)))
+               return -ENOMEM;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++              vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++              if (!vma_m)
++                      return -ENOMEM;
++      }
++#endif
++
+       vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+       return 0;
+ }
+@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+       struct mempolicy *pol;
+       bool faulted_in_anon_vma = true;
++      BUG_ON(vma->vm_mirror);
++
+       /*
+        * If anonymous vma has not yet been faulted, update new pgoff
+        * to match new location, to increase its chance of merging.
+@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+       return NULL;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++      struct vm_area_struct *prev_m;
++      struct rb_node **rb_link_m, *rb_parent_m;
++      struct mempolicy *pol_m;
++
++      BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++      BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++      BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++      *vma_m = *vma;
++      INIT_LIST_HEAD(&vma_m->anon_vma_chain);
++      if (anon_vma_clone(vma_m, vma))
++              return -ENOMEM;
++      pol_m = vma_policy(vma_m);
++      mpol_get(pol_m);
++      vma_set_policy(vma_m, pol_m);
++      vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++      vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++      vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++      vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++      if (vma_m->vm_file)
++              get_file(vma_m->vm_file);
++      if (vma_m->vm_ops && vma_m->vm_ops->open)
++              vma_m->vm_ops->open(vma_m);
++      BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
++      vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++      vma_m->vm_mirror = vma;
++      vma->vm_mirror = vma_m;
++      return 0;
++}
++#endif
++
+ /*
+  * Return true if the calling process may expand its vm space by the passed
+  * number of pages
+@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+       lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
++      gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
+       if (cur + npages > lim)
+               return 0;
+       return 1;
+@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->pax_flags & MF_PAX_MPROTECT) {
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++                      return -EPERM;
++              if (!(vm_flags & VM_EXEC))
++                      vm_flags &= ~VM_MAYEXEC;
++#else
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++                      vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++              else
++                      vm_flags &= ~VM_MAYWRITE;
++      }
++#endif
++
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 94722a4..e661e29 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -23,10 +23,18 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/perf_event.h>
++#include <linux/sched/sysctl.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/binfmts.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+       return pages;
+ }
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++      unsigned long oldlimit, newlimit = 0UL;
++
++      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
++              return;
++
++      spin_lock(&mm->page_table_lock);
++      oldlimit = mm->context.user_cs_limit;
++      if ((prot & VM_EXEC) && oldlimit < end)
++              /* USER_CS limit moved up */
++              newlimit = end;
++      else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++              /* USER_CS limit moved down */
++              newlimit = start;
++
++      if (newlimit) {
++              mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++              wmb();
++              cpus_clear(mm->context.cpu_user_cs_mask);
++              cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++              set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++      }
++      spin_unlock(&mm->page_table_lock);
++      if (newlimit == end) {
++              struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++              for (; vma && vma->vm_start < end; vma = vma->vm_next)
++                      if (is_vm_hugetlb_page(vma))
++                              hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++                      else
++                              change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
++      }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+       unsigned long start, unsigned long end, unsigned long newflags)
+@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+       int error;
+       int dirty_accountable = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++      unsigned long start_m, end_m;
++
++      start_m = start + SEGMEXEC_TASK_SIZE;
++      end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+       if (newflags == oldflags) {
+               *pprev = vma;
+               return 0;
+       }
++      if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++              struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++              if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++                      return -ENOMEM;
++
++              if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++                      return -ENOMEM;
++      }
++
+       /*
+        * If we make a private mapping writable we increase our commit;
+        * but (without finer accounting) cannot reduce our commit if we
+@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+               }
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++              if (start != vma->vm_start) {
++                      error = split_vma(mm, vma, start, 1);
++                      if (error)
++                              goto fail;
++                      BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++                      *pprev = (*pprev)->vm_next;
++              }
++
++              if (end != vma->vm_end) {
++                      error = split_vma(mm, vma, end, 0);
++                      if (error)
++                              goto fail;
++              }
++
++              if (pax_find_mirror_vma(vma)) {
++                      error = __do_munmap(mm, start_m, end_m - start_m);
++                      if (error)
++                              goto fail;
++              } else {
++                      vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++                      if (!vma_m) {
++                              error = -ENOMEM;
++                              goto fail;
++                      }
++                      vma->vm_flags = newflags;
++                      error = pax_mirror_vma(vma_m, vma);
++                      if (error) {
++                              vma->vm_flags = oldflags;
++                              goto fail;
++                      }
++              }
++      }
++#endif
++
+       /*
+        * First try to merge with previous and/or next vma.
+        */
+@@ -296,9 +400,21 @@ success:
+        * vm_flags and vm_page_prot are protected by the mmap_sem
+        * held in write mode.
+        */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
++              pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
++#endif
++
+       vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->binfmt && mm->binfmt->handle_mprotect)
++              mm->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+       vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
+-                                        vm_get_page_prot(newflags));
++                                        vm_get_page_prot(vma->vm_flags));
+       if (vma_wants_writenotify(vma)) {
+               vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
+@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       end = start + len;
+       if (end <= start)
+               return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return -EINVAL;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       if (!arch_validate_prot(prot))
+               return -EINVAL;
+@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       /*
+        * Does the application expect PROT_READ to imply PROT_EXEC:
+        */
+-      if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++      if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+               prot |= PROT_EXEC;
+       vm_flags = calc_vm_prot_bits(prot);
+@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       if (start > vma->vm_start)
+               prev = vma;
++#ifdef CONFIG_PAX_MPROTECT
++      if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
++              current->mm->binfmt->handle_mprotect(vma, vm_flags);
++#endif
++
+       for (nstart = start ; ; ) {
+               unsigned long newflags;
+@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+               /* newflags >> 4 shift VM_MAY% in place of VM_% */
+               if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
++                      if (prot & (PROT_WRITE | PROT_EXEC))
++                              gr_log_rwxmprotect(vma);
++
++                      error = -EACCES;
++                      goto out;
++              }
++
++              if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
+                       error = -EACCES;
+                       goto out;
+               }
+@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+               error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+               if (error)
+                       goto out;
++
++              track_exec_limit(current->mm, nstart, tmp, vm_flags);
++
+               nstart = tmp;
+               if (nstart < prev->vm_end)
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 463a257..c0c7a92 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+                       continue;
+               pte = ptep_get_and_clear(mm, old_addr, old_pte);
+               pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++              if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++                      pte = pte_exprotect(pte);
++#endif
++
+               set_pte_at(mm, new_addr, new_pte, pte);
+       }
+@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
+       if (is_vm_hugetlb_page(vma))
+               goto Einval;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_find_mirror_vma(vma))
++              goto Einval;
++#endif
++
+       /* We can't remap across vm area boundaries */
+       if (old_len > vma->vm_end - addr)
+               goto Efault;
+@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+       unsigned long ret = -EINVAL;
+       unsigned long charged = 0;
+       unsigned long map_flags;
++      unsigned long pax_task_size = TASK_SIZE;
+       if (new_addr & ~PAGE_MASK)
+               goto out;
+-      if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
+               goto out;
+       /* Check if the location we're moving into overlaps the
+        * old location at all, and fail if it does.
+        */
+-      if ((new_addr <= addr) && (new_addr+new_len) > addr)
+-              goto out;
+-
+-      if ((addr <= new_addr) && (addr+old_len) > new_addr)
++      if (addr + old_len > new_addr && new_addr + new_len > addr)
+               goto out;
+       ret = do_munmap(mm, new_addr, new_len);
+@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+       unsigned long ret = -EINVAL;
+       unsigned long charged = 0;
+       bool locked = false;
++      unsigned long pax_task_size = TASK_SIZE;
+       down_write(&current->mm->mmap_sem);
+@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+       if (!new_len)
+               goto out;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++          old_len > pax_task_size || addr > pax_task_size-old_len)
++              goto out;
++
+       if (flags & MREMAP_FIXED) {
+               if (flags & MREMAP_MAYMOVE)
+                       ret = mremap_to(addr, old_len, new_addr, new_len,
+@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+                               new_addr = addr;
+                       }
+                       ret = addr;
++                      track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+                       goto out;
+               }
+       }
+@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+                       goto out;
+               }
++              map_flags = vma->vm_flags;
+               ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
++              if (!(ret & ~PAGE_MASK)) {
++                      track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++                      track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++              }
+       }
+ out:
+       if (ret & ~PAGE_MASK)
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 298884d..5f74980 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+ unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
+ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
+-int heap_stack_gap = 0;
+ atomic_long_t mmap_pages_allocated;
+@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ EXPORT_SYMBOL(find_vma);
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+-      return find_vma(mm, addr);
+-}
+-
+-/*
+  * expand a stack to a given address
+  * - not supported under NOMMU conditions
+  */
+@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+       /* most fields are the same, copy all, and then fixup */
+       *new = *vma;
++      INIT_LIST_HEAD(&new->anon_vma_chain);
+       *region = *vma->vm_region;
+       new->vm_region = region;
+@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(generic_file_remap_pages);
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+-              unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++              unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct vm_area_struct *vma;
+@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+  *
+  * The caller must hold a reference on @mm.
+  */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+  * Access another process' address space.
+  * - source/target buffer must be kernel space
+  */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct mm_struct *mm;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 4514ad7..92eaa1c 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
+  *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
+  * - the bdi dirty thresh drops quickly due to change of JBOD workload
+  */
+-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
++static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
+                                       unsigned long thresh,
+                                       unsigned long bg_thresh,
+                                       unsigned long dirty,
+@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
+       }
+ }
+-static struct notifier_block __cpuinitdata ratelimit_nb = {
++static struct notifier_block ratelimit_nb = {
+       .notifier_call  = ratelimit_handler,
+       .next           = NULL,
+ };
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2ee0fd3..6e2edfb 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -60,6 +60,7 @@
+ #include <linux/page-debug-flags.h>
+ #include <linux/hugetlb.h>
+ #include <linux/sched/rt.h>
++#include <linux/random.h>
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -345,7 +346,7 @@ out:
+  * This usage means that zero-order pages may not be compound.
+  */
+-static void free_compound_page(struct page *page)
++void free_compound_page(struct page *page)
+ {
+       __free_pages_ok(page, compound_order(page));
+ }
+@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
+       int i;
+       int bad = 0;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      unsigned long index = 1UL << order;
++#endif
++
+       trace_mm_page_free(page, order);
+       kmemcheck_free_shadow(page, order);
+@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
+               debug_check_no_obj_freed(page_address(page),
+                                          PAGE_SIZE << order);
+       }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      for (; index; --index)
++              sanitize_highpage(page + index - 1);
++#endif
++
+       arch_free_page(page, order);
+       kernel_map_pages(page, 1 << order, 0);
+@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+       local_irq_restore(flags);
+ }
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++bool __meminitdata extra_latent_entropy;
++
++static int __init setup_pax_extra_latent_entropy(char *str)
++{
++      extra_latent_entropy = true;
++      return 0;
++}
++early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
++
++volatile u64 latent_entropy;
++#endif
++
+ /*
+  * Read access to zone->managed_pages is safe because it's unsigned long,
+  * but we still need to serialize writers. Currently all callers of
+@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+               set_page_count(p, 0);
+       }
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++      if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
++              u64 hash = 0;
++              size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
++              const u64 *data = lowmem_page_address(page);
++
++              for (index = 0; index < end; index++)
++                      hash ^= hash + data[index];
++              latent_entropy ^= hash;
++              add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++      }
++#endif
++
+       page_zone(page)->managed_pages += 1 << order;
+       set_page_refcounted(page);
+       __free_pages(page, order);
+@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+       arch_alloc_page(page, order);
+       kernel_map_pages(page, 1 << order, 1);
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+       if (gfp_flags & __GFP_ZERO)
+               prep_zero_page(page, order, gfp_flags);
++#endif
+       if (order && (gfp_flags & __GFP_COMP))
+               prep_compound_page(page, order);
+diff --git a/mm/page_io.c b/mm/page_io.c
+index a8a3ef4..7260a60 100644
+--- a/mm/page_io.c
++++ b/mm/page_io.c
+@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
+               struct file *swap_file = sis->swap_file;
+               struct address_space *mapping = swap_file->f_mapping;
+               struct iovec iov = {
+-                      .iov_base = kmap(page),
++                      .iov_base = (void __force_user *)kmap(page),
+                       .iov_len  = PAGE_SIZE,
+               };
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 8c8e08f..73a5cda 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
+ static unsigned int pcpu_high_unit_cpu __read_mostly;
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+ static const int *pcpu_unit_map __read_mostly;                /* cpu -> unit */
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index fd26d04..0cea1b0 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -13,6 +13,7 @@
+ #include <linux/uio.h>
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/slab.h>
+ #include <linux/syscalls.h>
+@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
+       size_t iov_l_curr_offset = 0;
+       ssize_t iov_len;
++      return -ENOSYS; // PaX: until properly audited
++
+       /*
+        * Work out how many pages of struct pages we're going to need
+        * when eventually calling get_user_pages
+        */
+       for (i = 0; i < riovcnt; i++) {
+               iov_len = rvec[i].iov_len;
+-              if (iov_len > 0) {
+-                      nr_pages_iov = ((unsigned long)rvec[i].iov_base
+-                                      + iov_len)
+-                              / PAGE_SIZE - (unsigned long)rvec[i].iov_base
+-                              / PAGE_SIZE + 1;
+-                      nr_pages = max(nr_pages, nr_pages_iov);
+-              }
++              if (iov_len <= 0)
++                      continue;
++              nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
++                              (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
++              nr_pages = max(nr_pages, nr_pages_iov);
+       }
+       if (nr_pages == 0)
+@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
+               goto free_proc_pages;
+       }
++      if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
++              rc = -EPERM;
++              goto put_task_struct;
++      }
++
+       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       if (!mm || IS_ERR(mm)) {
+               rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 6280da8..b5c090e 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+       struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma_chain *avc;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct anon_vma_chain *avc_m = NULL;
++#endif
++
+       might_sleep();
+       if (unlikely(!anon_vma)) {
+               struct mm_struct *mm = vma->vm_mm;
+@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               if (!avc)
+                       goto out_enomem;
++#ifdef CONFIG_PAX_SEGMEXEC
++              avc_m = anon_vma_chain_alloc(GFP_KERNEL);
++              if (!avc_m)
++                      goto out_enomem_free_avc;
++#endif
++
+               anon_vma = find_mergeable_anon_vma(vma);
+               allocated = NULL;
+               if (!anon_vma) {
+@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               /* page_table_lock to protect against threads */
+               spin_lock(&mm->page_table_lock);
+               if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
++
++                      if (vma_m) {
++                              BUG_ON(vma_m->anon_vma);
++                              vma_m->anon_vma = anon_vma;
++                              anon_vma_chain_link(vma_m, avc_m, anon_vma);
++                              avc_m = NULL;
++                      }
++#endif
++
+                       vma->anon_vma = anon_vma;
+                       anon_vma_chain_link(vma, avc, anon_vma);
+                       allocated = NULL;
+@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               if (unlikely(allocated))
+                       put_anon_vma(allocated);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (unlikely(avc_m))
++                      anon_vma_chain_free(avc_m);
++#endif
++
+               if (unlikely(avc))
+                       anon_vma_chain_free(avc);
+       }
+       return 0;
+  out_enomem_free_avc:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (avc_m)
++              anon_vma_chain_free(avc_m);
++#endif
++
+       anon_vma_chain_free(avc);
+  out_enomem:
+       return -ENOMEM;
+@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
+  * Attach the anon_vmas from src to dst.
+  * Returns 0 on success, -ENOMEM on failure.
+  */
+-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
+ {
+       struct anon_vma_chain *avc, *pavc;
+       struct anon_vma *root = NULL;
+@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+  * the corresponding VMA in the parent process is attached to.
+  * Returns 0 on success, non-zero on failure.
+  */
+-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
+ {
+       struct anon_vma_chain *avc;
+       struct anon_vma *anon_vma;
+@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
+ void __init anon_vma_init(void)
+ {
+       anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
+-                      0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
+-      anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
++                      0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
++                      anon_vma_ctor);
++      anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
++                      SLAB_PANIC|SLAB_NO_SANITIZE);
+ }
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 5e6a842..b41916e 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -33,7 +33,7 @@
+ #include <linux/swap.h>
+ #include <linux/aio.h>
+-static struct vfsmount *shm_mnt;
++struct vfsmount *shm_mnt;
+ #ifdef CONFIG_SHMEM
+ /*
+@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
+ #define BOGO_DIRENT_SIZE 20
+ /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+-#define SHORT_SYMLINK_LEN 128
++#define SHORT_SYMLINK_LEN 64
+ /*
+  * shmem_fallocate and shmem_writepage communicate via inode->i_private
+@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+       struct { const char *prefix; size_t len; } arr[] = {
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++              { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
++#endif
++
+               { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+               { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+       };
+@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+       if (err)
+               return err;
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++      if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++              if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++                      return -EOPNOTSUPP;
++              if (size > 8)
++                      return -EINVAL;
++      }
++#endif
++
+       return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
+@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+       int err = -ENOMEM;
+       /* Round up to L1_CACHE_BYTES to resist false sharing */
+-      sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+-                              L1_CACHE_BYTES), GFP_KERNEL);
++      sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
+       if (!sbinfo)
+               return -ENOMEM;
+diff --git a/mm/slab.c b/mm/slab.c
+index bd88411..2d46fd6 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
+               if ((x)->max_freeable < i)                              \
+                       (x)->max_freeable = i;                          \
+       } while (0)
+-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+-#define STATS_INC_ALLOCMISS(x)        atomic_inc(&(x)->allocmiss)
+-#define STATS_INC_FREEHIT(x)  atomic_inc(&(x)->freehit)
+-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x)        atomic_inc_unchecked(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x)  atomic_inc_unchecked(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
++#define STATS_INC_SANITIZED(x)        atomic_inc_unchecked(&(x)->sanitized)
++#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
+ #else
+ #define       STATS_INC_ACTIVE(x)     do { } while (0)
+ #define       STATS_DEC_ACTIVE(x)     do { } while (0)
+@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
+ #define STATS_INC_ALLOCMISS(x)        do { } while (0)
+ #define STATS_INC_FREEHIT(x)  do { } while (0)
+ #define STATS_INC_FREEMISS(x) do { } while (0)
++#define STATS_INC_SANITIZED(x)        do { } while (0)
++#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
+ #endif
+ #if DEBUG
+@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
+  */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+-                                      const struct slab *slab, void *obj)
++                                      const struct slab *slab, const void *obj)
+ {
+       u32 offset = (obj - slab->s_mem);
+       return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block __cpuinitdata cpucache_notifier = {
++static struct notifier_block cpucache_notifier = {
+       &cpuup_callback, NULL, 0
+ };
+@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
+        */
+       kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
+-                                      kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
++                                      kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
+       if (INDEX_AC != INDEX_NODE)
+               kmalloc_caches[INDEX_NODE] =
+                       create_kmalloc_cache("kmalloc-node",
+-                              kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
++                              kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
+       slab_early_init = 0;
+@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+       struct array_cache *ac = cpu_cache_get(cachep);
+       check_irq_off();
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (pax_sanitize_slab) {
++              if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
++                      memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
++
++                      if (cachep->ctor)
++                              cachep->ctor(objp);
++
++                      STATS_INC_SANITIZED(cachep);
++              } else
++                      STATS_INC_NOT_SANITIZED(cachep);
++      }
++#endif
++
+       kmemleak_free_recursive(objp, cachep->flags);
+       objp = cache_free_debugcheck(cachep, objp, caller);
+@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
+       if (unlikely(ZERO_OR_NULL_PTR(objp)))
+               return;
++      VM_BUG_ON(!virt_addr_valid(objp));
+       local_irq_save(flags);
+       kfree_debugcheck(objp);
+       c = virt_to_cache(objp);
+@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+       }
+       /* cpu stats */
+       {
+-              unsigned long allochit = atomic_read(&cachep->allochit);
+-              unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+-              unsigned long freehit = atomic_read(&cachep->freehit);
+-              unsigned long freemiss = atomic_read(&cachep->freemiss);
++              unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
++              unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
++              unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
++              unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
+               seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+                          allochit, allocmiss, freehit, freemiss);
+       }
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      {
++              unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
++              unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
++
++              seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
++      }
++#endif
+ #endif
+ }
+@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
+ static int __init slab_proc_init(void)
+ {
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+-      proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
++      proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
+ #endif
+       return 0;
+ }
+ module_init(slab_proc_init);
+ #endif
++bool is_usercopy_object(const void *ptr)
++{
++      struct page *page;
++      struct kmem_cache *cachep;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return false;
++
++      if (!slab_is_available())
++              return false;
++
++      if (!virt_addr_valid(ptr))
++              return false;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return false;
++
++      cachep = page->slab_cache;
++      return cachep->flags & SLAB_USERCOPY;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++      struct page *page;
++      struct kmem_cache *cachep;
++      struct slab *slabp;
++      unsigned int objnr;
++      unsigned long offset;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return "<null>";
++
++      if (!virt_addr_valid(ptr))
++              return NULL;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return NULL;
++
++      cachep = page->slab_cache;
++      if (!(cachep->flags & SLAB_USERCOPY))
++              return cachep->name;
++
++      slabp = page->slab_page;
++      objnr = obj_to_index(cachep, slabp, ptr);
++      BUG_ON(objnr >= cachep->num);
++      offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
++      if (offset <= cachep->object_size && n <= cachep->object_size - offset)
++              return NULL;
++
++      return cachep->name;
++}
++#endif
++
+ /**
+  * ksize - get the actual amount of memory allocated for a given object
+  * @objp: Pointer to the object
+diff --git a/mm/slab.h b/mm/slab.h
+index f96b49e..db1d204 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
+ /* The slab cache that manages slab cache information */
+ extern struct kmem_cache *kmem_cache;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#ifdef CONFIG_X86_64
++#define PAX_MEMORY_SANITIZE_VALUE     '\xfe'
++#else
++#define PAX_MEMORY_SANITIZE_VALUE     '\xff'
++#endif
++extern bool pax_sanitize_slab;
++#endif
++
+ unsigned long calculate_alignment(unsigned long flags,
+               unsigned long align, unsigned long size);
+@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+ /* Legal flag mask for kmem_cache_create(), for various configurations */
+ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+-                       SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
++                       SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
++                       SLAB_USERCOPY | SLAB_NO_SANITIZE)
+ #if defined(CONFIG_DEBUG_SLAB)
+ #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
+@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+               return s;
+       page = virt_to_head_page(x);
++
++      BUG_ON(!PageSlab(page));
++
+       cachep = page->slab_cache;
+       if (slab_equal_or_root(cachep, s))
+               return cachep;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 2d41450..4efe6ee 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -22,11 +22,22 @@
+ #include "slab.h"
+-enum slab_state slab_state;
++enum slab_state slab_state __read_only;
+ LIST_HEAD(slab_caches);
+ DEFINE_MUTEX(slab_mutex);
+ struct kmem_cache *kmem_cache;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++bool pax_sanitize_slab __read_only = true;
++static int __init pax_sanitize_slab_setup(char *str)
++{
++      pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
++      printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
++      return 1;
++}
++__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
++#endif
++
+ #ifdef CONFIG_DEBUG_VM
+ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
+                                  size_t size)
+@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
+               err = __kmem_cache_create(s, flags);
+               if (!err) {
+-                      s->refcount = 1;
++                      atomic_set(&s->refcount, 1);
+                       list_add(&s->list, &slab_caches);
+                       memcg_cache_list_add(memcg, s);
+               } else {
+@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+       get_online_cpus();
+       mutex_lock(&slab_mutex);
+-      s->refcount--;
+-      if (!s->refcount) {
++      if (atomic_dec_and_test(&s->refcount)) {
+               list_del(&s->list);
+               if (!__kmem_cache_shutdown(s)) {
+@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
+               panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
+                                       name, size, err);
+-      s->refcount = -1;       /* Exempt from merging for now */
++      atomic_set(&s->refcount, -1);   /* Exempt from merging for now */
+ }
+ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+       create_boot_cache(s, name, size, flags);
+       list_add(&s->list, &slab_caches);
+-      s->refcount = 1;
++      atomic_set(&s->refcount, 1);
+       return s;
+ }
+@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+ EXPORT_SYMBOL(kmalloc_dma_caches);
+ #endif
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
++EXPORT_SYMBOL(kmalloc_usercopy_caches);
++#endif
++
+ /*
+  * Conversion table for small slabs sizes / 8 to the index in the
+  * kmalloc array. This is necessary for slabs < 192 since we have non power
+@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+               return kmalloc_dma_caches[index];
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      if (unlikely((flags & GFP_USERCOPY)))
++              return kmalloc_usercopy_caches[index];
++
++#endif
++
+       return kmalloc_caches[index];
+ }
+@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
+       for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
+               if (!kmalloc_caches[i]) {
+                       kmalloc_caches[i] = create_kmalloc_cache(NULL,
+-                                                      1 << i, flags);
++                                                      1 << i, SLAB_USERCOPY | flags);
+               }
+               /*
+@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
+                * earlier power of two caches
+                */
+               if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
+-                      kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
++                      kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
+               if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
+-                      kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
++                      kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
+       }
+       /* Kmalloc array is now usable */
+@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+               }
+       }
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
++              struct kmem_cache *s = kmalloc_caches[i];
++
++              if (s) {
++                      int size = kmalloc_size(i);
++                      char *n = kasprintf(GFP_NOWAIT,
++                               "usercopy-kmalloc-%d", size);
++
++                      BUG_ON(!n);
++                      kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
++                              size, SLAB_USERCOPY | flags);
++              }
++      }
++#endif
++
+ }
+ #endif /* !CONFIG_SLOB */
+@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
+       seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+                "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+       seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      seq_puts(m, " : pax <sanitized> <not_sanitized>");
++#endif
+ #endif
+       seq_putc(m, '\n');
+ }
+diff --git a/mm/slob.c b/mm/slob.c
+index eeed4a0..bb0e9ab 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+ /*
+  * Return the size of a slob block.
+  */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+       if (s->units > 0)
+               return s->units;
+@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+  * Return the next free slob block pointer after this one.
+  */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+       slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+       slobidx_t next;
+@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
+ /*
+  * Returns true if s is the last free block in its page.
+  */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+       return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+-static void *slob_new_pages(gfp_t gfp, int order, int node)
++static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
+ {
+-      void *page;
++      struct page *page;
+ #ifdef CONFIG_NUMA
+       if (node != NUMA_NO_NODE)
+@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+       if (!page)
+               return NULL;
+-      return page_address(page);
++      __SetPageSlab(page);
++      return page;
+ }
+-static void slob_free_pages(void *b, int order)
++static void slob_free_pages(struct page *sp, int order)
+ {
+       if (current->reclaim_state)
+               current->reclaim_state->reclaimed_slab += 1 << order;
+-      free_pages((unsigned long)b, order);
++      __ClearPageSlab(sp);
++      page_mapcount_reset(sp);
++      sp->private = 0;
++      __free_pages(sp, order);
+ }
+ /*
+@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+       /* Not enough space: must allocate a new page */
+       if (!b) {
+-              b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
+-              if (!b)
++              sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
++              if (!sp)
+                       return NULL;
+-              sp = virt_to_page(b);
+-              __SetPageSlab(sp);
++              b = page_address(sp);
+               spin_lock_irqsave(&slob_lock, flags);
+               sp->units = SLOB_UNITS(PAGE_SIZE);
+               sp->freelist = b;
++              sp->private = 0;
+               INIT_LIST_HEAD(&sp->list);
+               set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+               set_slob_page_free(sp, slob_list);
+@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
+               if (slob_page_free(sp))
+                       clear_slob_page_free(sp);
+               spin_unlock_irqrestore(&slob_lock, flags);
+-              __ClearPageSlab(sp);
+-              page_mapcount_reset(sp);
+-              slob_free_pages(b, 0);
++              slob_free_pages(sp, 0);
+               return;
+       }
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (pax_sanitize_slab)
++              memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
++#endif
++
+       if (!slob_page_free(sp)) {
+               /* This slob page is about to become partially free. Easy! */
+               sp->units = units;
+@@ -424,11 +431,10 @@ out:
+  */
+ static __always_inline void *
+-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
++__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
+ {
+-      unsigned int *m;
+-      int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-      void *ret;
++      slob_t *m;
++      void *ret = NULL;
+       gfp &= gfp_allowed_mask;
+@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+               if (!m)
+                       return NULL;
+-              *m = size;
++              BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++              BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++              m[0].units = size;
++              m[1].units = align;
+               ret = (void *)m + align;
+               trace_kmalloc_node(caller, ret,
+                                  size, size + align, gfp, node);
+       } else {
+               unsigned int order = get_order(size);
++              struct page *page;
+               if (likely(order))
+                       gfp |= __GFP_COMP;
+-              ret = slob_new_pages(gfp, order, node);
++              page = slob_new_pages(gfp, order, node);
++              if (page) {
++                      ret = page_address(page);
++                      page->private = size;
++              }
+               trace_kmalloc_node(caller, ret,
+                                  size, PAGE_SIZE << order, gfp, node);
+       }
+-      kmemleak_alloc(ret, size, 1, gfp);
++      return ret;
++}
++
++static __always_inline void *
++__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
++{
++      int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++      void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
++
++      if (!ZERO_OR_NULL_PTR(ret))
++              kmemleak_alloc(ret, size, 1, gfp);
+       return ret;
+ }
+@@ -493,34 +517,112 @@ void kfree(const void *block)
+               return;
+       kmemleak_free(block);
++      VM_BUG_ON(!virt_addr_valid(block));
+       sp = virt_to_page(block);
+-      if (PageSlab(sp)) {
++      VM_BUG_ON(!PageSlab(sp));
++      if (!sp->private) {
+               int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-              unsigned int *m = (unsigned int *)(block - align);
+-              slob_free(m, *m + align);
+-      } else
++              slob_t *m = (slob_t *)(block - align);
++              slob_free(m, m[0].units + align);
++      } else {
++              __ClearPageSlab(sp);
++              page_mapcount_reset(sp);
++              sp->private = 0;
+               __free_pages(sp, compound_order(sp));
++      }
+ }
+ EXPORT_SYMBOL(kfree);
++bool is_usercopy_object(const void *ptr)
++{
++      if (!slab_is_available())
++              return false;
++
++      // PAX: TODO
++
++      return false;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++      struct page *page;
++      const slob_t *free;
++      const void *base;
++      unsigned long flags;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return "<null>";
++
++      if (!virt_addr_valid(ptr))
++              return NULL;
++
++      page = virt_to_head_page(ptr);
++      if (!PageSlab(page))
++              return NULL;
++
++      if (page->private) {
++              base = page;
++              if (base <= ptr && n <= page->private - (ptr - base))
++                      return NULL;
++              return "<slob>";
++      }
++
++      /* some tricky double walking to find the chunk */
++      spin_lock_irqsave(&slob_lock, flags);
++      base = (void *)((unsigned long)ptr & PAGE_MASK);
++      free = page->freelist;
++
++      while (!slob_last(free) && (void *)free <= ptr) {
++              base = free + slob_units(free);
++              free = slob_next(free);
++      }
++
++      while (base < (void *)free) {
++              slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++              int size = SLOB_UNIT * SLOB_UNITS(m + align);
++              int offset;
++
++              if (ptr < base + align)
++                      break;
++
++              offset = ptr - base - align;
++              if (offset >= m) {
++                      base += size;
++                      continue;
++              }
++
++              if (n > m - offset)
++                      break;
++
++              spin_unlock_irqrestore(&slob_lock, flags);
++              return NULL;
++      }
++
++      spin_unlock_irqrestore(&slob_lock, flags);
++      return "<slob>";
++}
++#endif
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+       struct page *sp;
+       int align;
+-      unsigned int *m;
++      slob_t *m;
+       BUG_ON(!block);
+       if (unlikely(block == ZERO_SIZE_PTR))
+               return 0;
+       sp = virt_to_page(block);
+-      if (unlikely(!PageSlab(sp)))
+-              return PAGE_SIZE << compound_order(sp);
++      VM_BUG_ON(!PageSlab(sp));
++      if (sp->private)
++              return sp->private;
+       align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-      m = (unsigned int *)(block - align);
+-      return SLOB_UNITS(*m) * SLOB_UNIT;
++      m = (slob_t *)(block - align);
++      return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ }
+ EXPORT_SYMBOL(ksize);
+@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+ {
+-      void *b;
++      void *b = NULL;
+       flags &= gfp_allowed_mask;
+       lockdep_trace_alloc(flags);
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
++#else
+       if (c->size < PAGE_SIZE) {
+               b = slob_alloc(c->size, flags, c->align, node);
+               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+                                           SLOB_UNITS(c->size) * SLOB_UNIT,
+                                           flags, node);
+       } else {
+-              b = slob_new_pages(flags, get_order(c->size), node);
++              struct page *sp;
++
++              sp = slob_new_pages(flags, get_order(c->size), node);
++              if (sp) {
++                      b = page_address(sp);
++                      sp->private = c->size;
++              }
+               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+                                           PAGE_SIZE << get_order(c->size),
+                                           flags, node);
+       }
++#endif
+       if (c->ctor)
+               c->ctor(b);
+@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+ static void __kmem_cache_free(void *b, int size)
+ {
+-      if (size < PAGE_SIZE)
++      struct page *sp;
++
++      sp = virt_to_page(b);
++      BUG_ON(!PageSlab(sp));
++      if (!sp->private)
+               slob_free(b, size);
+       else
+-              slob_free_pages(b, get_order(size));
++              slob_free_pages(sp, get_order(size));
+ }
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++      int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      if (size + c->align < PAGE_SIZE) {
++              size += c->align;
++              b -= c->align;
++      }
++#endif
++
+       kmemleak_free_recursive(b, c->flags);
+       if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+               struct slob_rcu *slob_rcu;
+-              slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+-              slob_rcu->size = c->size;
++              slob_rcu = b + (size - sizeof(struct slob_rcu));
++              slob_rcu->size = size;
+               call_rcu(&slob_rcu->head, kmem_rcu_free);
+       } else {
+-              __kmem_cache_free(b, c->size);
++              __kmem_cache_free(b, size);
+       }
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      trace_kfree(_RET_IP_, b);
++#else
+       trace_kmem_cache_free(_RET_IP_, b);
++#endif
++
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+diff --git a/mm/slub.c b/mm/slub.c
+index 57707f0..7857bd3 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -198,7 +198,7 @@ struct track {
+ enum track_item { TRACK_ALLOC, TRACK_FREE };
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *);
+ static int sysfs_slab_alias(struct kmem_cache *, const char *);
+ static void sysfs_slab_remove(struct kmem_cache *);
+@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
+       if (!t->addr)
+               return;
+-      printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
++      printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
+               s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+ #ifdef CONFIG_STACKTRACE
+       {
+@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
+       slab_free_hook(s, x);
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
++              memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
++              if (s->ctor)
++                      s->ctor(x);
++      }
++#endif
++
+ redo:
+       /*
+        * Determine the currently cpus per cpu slab.
+@@ -2661,7 +2669,7 @@ static int slub_min_objects;
+  * Merge control. If this is set then no merging of slab caches will occur.
+  * (Could be removed. This was introduced to pacify the merge skeptics.)
+  */
+-static int slub_nomerge;
++static int slub_nomerge = 1;
+ /*
+  * Calculate the order of allocation given an slab object size.
+@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+       s->inuse = size;
+       if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++              (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
++#endif
+               s->ctor)) {
+               /*
+                * Relocate free pointer after the object if it is not
+@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
++bool is_usercopy_object(const void *ptr)
++{
++      struct page *page;
++      struct kmem_cache *s;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return false;
++
++      if (!slab_is_available())
++              return false;
++
++      if (!virt_addr_valid(ptr))
++              return false;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return false;
++
++      s = page->slab_cache;
++      return s->flags & SLAB_USERCOPY;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++      struct page *page;
++      struct kmem_cache *s;
++      unsigned long offset;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return "<null>";
++
++      if (!virt_addr_valid(ptr))
++              return NULL;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return NULL;
++
++      s = page->slab_cache;
++      if (!(s->flags & SLAB_USERCOPY))
++              return s->name;
++
++      offset = (ptr - page_address(page)) % s->size;
++      if (offset <= s->object_size && n <= s->object_size - offset)
++              return NULL;
++
++      return s->name;
++}
++#endif
++
+ size_t ksize(const void *object)
+ {
+       struct page *page;
+@@ -3347,6 +3411,7 @@ void kfree(const void *x)
+       if (unlikely(ZERO_OR_NULL_PTR(x)))
+               return;
++      VM_BUG_ON(!virt_addr_valid(x));
+       page = virt_to_head_page(x);
+       if (unlikely(!PageSlab(page))) {
+               BUG_ON(!PageCompound(page));
+@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+       /*
+        * We may have set a slab to be unmergeable during bootstrap.
+        */
+-      if (s->refcount < 0)
++      if (atomic_read(&s->refcount) < 0)
+               return 1;
+       return 0;
+@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+       s = find_mergeable(memcg, size, align, flags, name, ctor);
+       if (s) {
+-              s->refcount++;
++              atomic_inc(&s->refcount);
+               /*
+                * Adjust the object sizes so that we clear
+                * the complete object on kzalloc.
+@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+               s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+               if (sysfs_slab_alias(s, name)) {
+-                      s->refcount--;
++                      atomic_dec(&s->refcount);
+                       s = NULL;
+               }
+       }
+@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata slab_notifier = {
++static struct notifier_block slab_notifier = {
+       .notifier_call = slab_cpuup_callback
+ };
+@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+ }
+ #endif
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int count_inuse(struct page *page)
+ {
+       return page->inuse;
+@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
+       validate_slab_cache(kmalloc_caches[9]);
+ }
+ #else
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static void resiliency_test(void) {};
+ #endif
+ #endif
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ enum slab_stat_type {
+       SL_ALL,                 /* All slabs */
+       SL_PARTIAL,             /* Only partially allocated slabs */
+@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+-      return sprintf(buf, "%d\n", s->refcount - 1);
++      return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+ SLAB_ATTR_RO(cache_dma);
+ #endif
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
++{
++      return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
++}
++SLAB_ATTR_RO(usercopy);
++#endif
++
+ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
+ {
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
+ #ifdef CONFIG_ZONE_DMA
+       &cache_dma_attr.attr,
+ #endif
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++      &usercopy_attr.attr,
++#endif
+ #ifdef CONFIG_NUMA
+       &remote_node_defrag_ratio_attr.attr,
+ #endif
+@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
+       return name;
+ }
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *s)
+ {
+       int err;
+@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
+       }
+       s->kobj.kset = slab_kset;
+-      err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
++      err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
+       if (err) {
+               kobject_put(&s->kobj);
+               return err;
+@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+       kobject_del(&s->kobj);
+       kobject_put(&s->kobj);
+ }
++#endif
+ /*
+  * Need to buffer aliases during bootup until sysfs becomes
+@@ -5198,6 +5276,7 @@ struct saved_alias {
+ static struct saved_alias *alias_list;
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+ {
+       struct saved_alias *al;
+@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+       alias_list = al;
+       return 0;
+ }
++#endif
+ static int __init slab_sysfs_init(void)
+ {
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index 27eeab3..7c3f7f2 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pud_populate(&init_mm, pud, p);
++              pud_populate_kernel(&init_mm, pud, p);
+       }
+       return pud;
+ }
+@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pgd_populate(&init_mm, pgd, p);
++              pgd_populate_kernel(&init_mm, pgd, p);
+       }
+       return pgd;
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 1c91f0d3..485470a 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+       for (i = 0; i < PAGES_PER_SECTION; i++) {
+               if (PageHWPoison(&memmap[i])) {
+-                      atomic_long_sub(1, &num_poisoned_pages);
++                      atomic_long_sub_unchecked(1, &num_poisoned_pages);
+                       ClearPageHWPoison(&memmap[i]);
+               }
+       }
+diff --git a/mm/swap.c b/mm/swap.c
+index dfd7d71..ccdf688 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -31,6 +31,7 @@
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
+ #include <linux/uio.h>
++#include <linux/hugetlb.h>
+ #include "internal.h"
+@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
+       __page_cache_release(page);
+       dtor = get_compound_page_dtor(page);
++      if (!PageHuge(page))
++              BUG_ON(dtor != free_compound_page);
+       (*dtor)(page);
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 746af55b..7ac94ae 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
+ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+ /* Activity counter to indicate that a swapon or swapoff has occurred */
+-static atomic_t proc_poll_event = ATOMIC_INIT(0);
++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
+ static inline unsigned char swap_count(unsigned char ent)
+ {
+@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+       }
+       filp_close(swap_file, NULL);
+       err = 0;
+-      atomic_inc(&proc_poll_event);
++      atomic_inc_unchecked(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
+ out_dput:
+@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
+       poll_wait(file, &proc_poll_wait, wait);
+-      if (seq->poll_event != atomic_read(&proc_poll_event)) {
+-              seq->poll_event = atomic_read(&proc_poll_event);
++      if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
++              seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+               return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+       }
+@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
+               return ret;
+       seq = file->private_data;
+-      seq->poll_event = atomic_read(&proc_poll_event);
++      seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+       return 0;
+ }
+@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+               (frontswap_map) ? "FS" : "");
+       mutex_unlock(&swapon_mutex);
+-      atomic_inc(&proc_poll_event);
++      atomic_inc_unchecked(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
+       if (S_ISREG(inode->i_mode))
+diff --git a/mm/util.c b/mm/util.c
+index ab1424d..7c5bd5a 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -294,6 +294,12 @@ done:
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              mm->mmap_base += mm->delta_mmap;
++#endif
++
+       mm->get_unmapped_area = arch_get_unmapped_area;
+       mm->unmap_area = arch_unmap_area;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index d365724..6cae7c2 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+-              pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+-              WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++                      BUG_ON(!pte_exec(*pte));
++                      set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++                      continue;
++              }
++#endif
++
++              {
++                      pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++                      WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++              }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+       pte = pte_alloc_kernel(pmd, addr);
+       if (!pte)
+               return -ENOMEM;
++
++      pax_open_kernel();
+       do {
+               struct page *page = pages[*nr];
+-              if (WARN_ON(!pte_none(*pte)))
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++              if (!pte_none(*pte)) {
++                      pax_close_kernel();
++                      WARN_ON(1);
+                       return -EBUSY;
+-              if (WARN_ON(!page))
++              }
++              if (!page) {
++                      pax_close_kernel();
++                      WARN_ON(1);
+                       return -ENOMEM;
++              }
+               set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+               (*nr)++;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+       pmd_t *pmd;
+       unsigned long next;
+-      pmd = pmd_alloc(&init_mm, pud, addr);
++      pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+       pud_t *pud;
+       unsigned long next;
+-      pud = pud_alloc(&init_mm, pgd, addr);
++      pud = pud_alloc_kernel(&init_mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
+       if (addr >= MODULES_VADDR && addr < MODULES_END)
+               return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++              return 1;
++#endif
++
+       return is_vmalloc_addr(x);
+ }
+@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+       if (!pgd_none(*pgd)) {
+               pud_t *pud = pud_offset(pgd, addr);
++#ifdef CONFIG_X86
++              if (!pud_large(*pud))
++#endif
+               if (!pud_none(*pud)) {
+                       pmd_t *pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_X86
++                      if (!pmd_large(*pmd))
++#endif
+                       if (!pmd_none(*pmd)) {
+                               pte_t *ptep, pte;
+@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
+  * Allocate a region of KVA of the specified size and alignment, within the
+  * vstart and vend.
+  */
+-static struct vmap_area *alloc_vmap_area(unsigned long size,
++static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
+                               unsigned long align,
+                               unsigned long vstart, unsigned long vend,
+                               int node, gfp_t gfp_mask)
+@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+       struct vm_struct *area;
+       BUG_ON(in_interrupt());
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (flags & VM_KERNEXEC) {
++              if (start != VMALLOC_START || end != VMALLOC_END)
++                      return NULL;
++              start = (unsigned long)MODULES_EXEC_VADDR;
++              end = (unsigned long)MODULES_EXEC_END;
++      }
++#endif
++
+       if (flags & VM_IOREMAP) {
+               int bit = fls(size);
+@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
+       if (count > totalram_pages)
+               return NULL;
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (!(pgprot_val(prot) & _PAGE_NX))
++              flags |= VM_KERNEXEC;
++#endif
++
+       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+                                       __builtin_return_address(0));
+       if (!area)
+@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+       if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+               goto fail;
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (!(pgprot_val(prot) & _PAGE_NX))
++              area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
++                                        VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
++      else
++#endif
++
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+                                 start, end, node, gfp_mask, caller);
+       if (!area)
+@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
+  *    For tight control over page level allocator and protection flags
+  *    use __vmalloc() instead.
+  */
+-
+ void *vmalloc_exec(unsigned long size)
+ {
+-      return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++      return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+                             NUMA_NO_NODE, __builtin_return_address(0));
+ }
+@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+       unsigned long uaddr = vma->vm_start;
+       unsigned long usize = vma->vm_end - vma->vm_start;
++      BUG_ON(vma->vm_mirror);
++
+       if ((PAGE_SIZE-1) & (unsigned long)addr)
+               return -EINVAL;
+@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
+               v->addr, v->addr + v->size, v->size);
+       if (v->caller)
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              seq_printf(m, " %pK", v->caller);
++#else
+               seq_printf(m, " %pS", v->caller);
++#endif
+       if (v->nr_pages)
+               seq_printf(m, " pages=%d", v->nr_pages);
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index f42745e..62f8346 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
+  *
+  * vm_stat contains the global counters
+  */
+-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+ EXPORT_SYMBOL(vm_stat);
+ #ifdef CONFIG_SMP
+@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
+                               v = p->vm_stat_diff[i];
+                               p->vm_stat_diff[i] = 0;
+                               local_irq_restore(flags);
+-                              atomic_long_add(v, &zone->vm_stat[i]);
++                              atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+                               global_diff[i] += v;
+ #ifdef CONFIG_NUMA
+                               /* 3 seconds idle till flush */
+@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               if (global_diff[i])
+-                      atomic_long_add(global_diff[i], &vm_stat[i]);
++                      atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
+ }
+ /*
+@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
+               if (pset->vm_stat_diff[i]) {
+                       int v = pset->vm_stat_diff[i];
+                       pset->vm_stat_diff[i] = 0;
+-                      atomic_long_add(v, &zone->vm_stat[i]);
+-                      atomic_long_add(v, &vm_stat[i]);
++                      atomic_long_add_unchecked(v, &zone->vm_stat[i]);
++                      atomic_long_add_unchecked(v, &vm_stat[i]);
+               }
+ }
+ #endif
+@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __cpuinitdata vmstat_notifier =
++static struct notifier_block vmstat_notifier =
+       { &vmstat_cpuup_callback, NULL, 0 };
+ #endif
+@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
+               start_cpu_timer(cpu);
+ #endif
+ #ifdef CONFIG_PROC_FS
+-      proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+-      proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+-      proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+-      proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
++      {
++              mode_t gr_mode = S_IRUGO;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++              gr_mode = S_IRUSR;
++#endif
++              proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
++              proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++              proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
++#else
++              proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
++#endif
++              proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
++      }
+ #endif
+       return 0;
+ }
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 9424f37..6aabf19 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -469,7 +469,7 @@ out:
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block vlan_notifier_block __read_mostly = {
++static struct notifier_block vlan_notifier_block = {
+       .notifier_call = vlan_device_event,
+ };
+@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+-              if ((args.u.name_type >= 0) &&
+-                  (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
++              if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+                       struct vlan_net *vn;
+                       vn = net_generic(net, vlan_net_id);
+diff --git a/net/9p/mod.c b/net/9p/mod.c
+index 6ab36ae..6f1841b 100644
+--- a/net/9p/mod.c
++++ b/net/9p/mod.c
+@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
+ void v9fs_register_trans(struct p9_trans_module *m)
+ {
+       spin_lock(&v9fs_trans_lock);
+-      list_add_tail(&m->list, &v9fs_trans_list);
++      pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
+       spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_register_trans);
+@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
+ void v9fs_unregister_trans(struct p9_trans_module *m)
+ {
+       spin_lock(&v9fs_trans_lock);
+-      list_del_init(&m->list);
++      pax_list_del_init((struct list_head *)&m->list);
+       spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_unregister_trans);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 02efb25..41541a9 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
+       oldfs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
++      ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
+       set_fs(oldfs);
+       if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
+index 876fbe8..8bbea9f 100644
+--- a/net/atm/atm_misc.c
++++ b/net/atm/atm_misc.c
+@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
+       if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+               return 1;
+       atm_return(vcc, truesize);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return 0;
+ }
+ EXPORT_SYMBOL(atm_charge);
+@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
+               }
+       }
+       atm_return(vcc, guess);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return NULL;
+ }
+ EXPORT_SYMBOL(atm_alloc_charge);
+@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
+ void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
+ void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/atm/lec.h b/net/atm/lec.h
+index 4149db1..f2ab682 100644
+--- a/net/atm/lec.h
++++ b/net/atm/lec.h
+@@ -48,7 +48,7 @@ struct lane2_ops {
+                             const u8 *tlvs, u32 sizeoftlvs);
+       void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
+                                    const u8 *tlvs, u32 sizeoftlvs);
+-};
++} __no_const;
+ /*
+  * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
+diff --git a/net/atm/proc.c b/net/atm/proc.c
+index bbb6461..cf04016 100644
+--- a/net/atm/proc.c
++++ b/net/atm/proc.c
+@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
+   const struct k_atm_aal_stats *stats)
+ {
+       seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+-                 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+-                 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+-                 atomic_read(&stats->rx_drop));
++                 atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++                 atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++                 atomic_read_unchecked(&stats->rx_drop));
+ }
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 0447d5d..3cf4728 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+     struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+     struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
+index d5744b7..506bae3 100644
+--- a/net/ax25/sysctl_net_ax25.c
++++ b/net/ax25/sysctl_net_ax25.c
+@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
+ {
+       char path[sizeof("net/ax25/") + IFNAMSIZ];
+       int k;
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
+       if (!table)
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index f680ee1..97e3542 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+-      atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
++      atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+       hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+       ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+       batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+       /* change sequence number to network order */
+-      seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
++      seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
+       batadv_ogm_packet->seqno = htonl(seqno);
+-      atomic_inc(&hard_iface->bat_iv.ogm_seqno);
++      atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
+       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
+@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
+               return;
+       /* could be changed by schedule_own_packet() */
+-      if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
++      if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
+       if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
+               has_directlink_flag = 1;
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index de27b31..7058bfe 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -1522,6 +1522,8 @@ out:
+  * in these cases, the skb is further handled by this function and
+  * returns 1, otherwise it returns 0 and the caller shall further
+  * process the skb.
++ *
++ * This call might reallocate skb data.
+  */
+ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
+ {
+diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
+index f105219..7614af3 100644
+--- a/net/batman-adv/gateway_client.c
++++ b/net/batman-adv/gateway_client.c
+@@ -508,6 +508,7 @@ out:
+       return 0;
+ }
++/* this call might reallocate skb data */
+ static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
+ {
+       int ret = false;
+@@ -568,6 +569,7 @@ out:
+       return ret;
+ }
++/* this call might reallocate skb data */
+ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
+ {
+       struct ethhdr *ethhdr;
+@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
+       if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+               return false;
++
++      /* skb->data might have been reallocated by pskb_may_pull() */
++      ethhdr = (struct ethhdr *)skb->data;
++      if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
++              ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
++
+       udphdr = (struct udphdr *)(skb->data + *header_len);
+       *header_len += sizeof(*udphdr);
+@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
+       return true;
+ }
++/* this call might reallocate skb data */
+ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+-                          struct sk_buff *skb, struct ethhdr *ethhdr)
++                          struct sk_buff *skb)
+ {
+       struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+       struct batadv_orig_node *orig_dst_node = NULL;
+       struct batadv_gw_node *curr_gw = NULL;
++      struct ethhdr *ethhdr;
+       bool ret, out_of_range = false;
+       unsigned int header_len = 0;
+       uint8_t curr_tq_avg;
+@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+       if (!ret)
+               goto out;
++      ethhdr = (struct ethhdr *)skb->data;
+       orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+                                                ethhdr->h_dest);
+       if (!orig_dst_node)
+diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
+index 039902d..1037d75 100644
+--- a/net/batman-adv/gateway_client.h
++++ b/net/batman-adv/gateway_client.h
+@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
+ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+-                          struct sk_buff *skb, struct ethhdr *ethhdr);
++bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
+ #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 522243a..b48c0ef 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+       hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+       dev_add_pack(&hard_iface->batman_adv_ptype);
+-      atomic_set(&hard_iface->frag_seqno, 1);
++      atomic_set_unchecked(&hard_iface->frag_seqno, 1);
+       batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
+                   hard_iface->net_dev->name);
+@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
+       /* This can't be called via a bat_priv callback because
+        * we have no bat_priv yet.
+        */
+-      atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
++      atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
+       hard_iface->bat_iv.ogm_buff = NULL;
+       return hard_iface;
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 819dfb0..226bacd 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
+       if (batadv_bla_tx(bat_priv, skb, vid))
+               goto dropped;
++      /* skb->data might have been reallocated by batadv_bla_tx() */
++      ethhdr = (struct ethhdr *)skb->data;
++
+       /* Register the client MAC in the transtable */
+       if (!is_multicast_ether_addr(ethhdr->h_source))
+               batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
+               default:
+                       break;
+               }
++
++              /* reminder: ethhdr might have become unusable from here on
++               * (batadv_gw_is_dhcp_target() might have reallocated skb data)
++               */
+       }
+       /* ethernet packet should be broadcasted */
+@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
+                      primary_if->net_dev->dev_addr, ETH_ALEN);
+               /* set broadcast sequence number */
+-              seqno = atomic_inc_return(&bat_priv->bcast_seqno);
++              seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
+               bcast_packet->seqno = htonl(seqno);
+               batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
+@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
+       /* unicast packet */
+       } else {
+               if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
+-                      ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
++                      ret = batadv_gw_out_of_range(bat_priv, skb);
+                       if (ret)
+                               goto dropped;
+               }
+@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
+       atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
+       atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+-      atomic_set(&bat_priv->bcast_seqno, 1);
++      atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
+       atomic_set(&bat_priv->tt.vn, 0);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index aba8364..50fcbb8 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -51,7 +51,7 @@
+ struct batadv_hard_iface_bat_iv {
+       unsigned char *ogm_buff;
+       int ogm_buff_len;
+-      atomic_t ogm_seqno;
++      atomic_unchecked_t ogm_seqno;
+ };
+ /**
+@@ -75,7 +75,7 @@ struct batadv_hard_iface {
+       int16_t if_num;
+       char if_status;
+       struct net_device *net_dev;
+-      atomic_t frag_seqno;
++      atomic_unchecked_t frag_seqno;
+       struct kobject *hardif_obj;
+       atomic_t refcount;
+       struct packet_type batman_adv_ptype;
+@@ -558,7 +558,7 @@ struct batadv_priv {
+ #ifdef CONFIG_BATMAN_ADV_DEBUG
+       atomic_t log_level;
+ #endif
+-      atomic_t bcast_seqno;
++      atomic_unchecked_t bcast_seqno;
+       atomic_t bcast_queue_left;
+       atomic_t batman_queue_left;
+       char num_ifaces;
+diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
+index 0bb3b59..0e3052e 100644
+--- a/net/batman-adv/unicast.c
++++ b/net/batman-adv/unicast.c
+@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
+       frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
+       frag2->flags = large_tail;
+-      seqno = atomic_add_return(2, &hard_iface->frag_seqno);
++      seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
+       frag1->seqno = htons(seqno - 1);
+       frag2->seqno = htons(seqno);
+@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
+  * @skb: the skb containing the payload to encapsulate
+  * @orig_node: the destination node
+  *
+- * Returns false if the payload could not be encapsulated or true otherwise
++ * Returns false if the payload could not be encapsulated or true otherwise.
++ *
++ * This call might reallocate skb data.
+  */
+ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
+                                      struct batadv_orig_node *orig_node)
+@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
+  * @orig_node: the destination node
+  * @packet_subtype: the batman 4addr packet subtype to use
+  *
+- * Returns false if the payload could not be encapsulated or true otherwise
++ * Returns false if the payload could not be encapsulated or true otherwise.
++ *
++ * This call might reallocate skb data.
+  */
+ bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
+                                     struct sk_buff *skb,
+@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
+       struct batadv_neigh_node *neigh_node;
+       int data_len = skb->len;
+       int ret = NET_RX_DROP;
+-      unsigned int dev_mtu;
++      unsigned int dev_mtu, header_len;
+       /* get routing information */
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
+@@ -429,10 +433,12 @@ find_router:
+       switch (packet_type) {
+       case BATADV_UNICAST:
+               batadv_unicast_prepare_skb(skb, orig_node);
++              header_len = sizeof(struct batadv_unicast_packet);
+               break;
+       case BATADV_UNICAST_4ADDR:
+               batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+                                                packet_subtype);
++              header_len = sizeof(struct batadv_unicast_4addr_packet);
+               break;
+       default:
+               /* this function supports UNICAST and UNICAST_4ADDR only. It
+@@ -441,6 +447,7 @@ find_router:
+               goto out;
+       }
++      ethhdr = (struct ethhdr *)(skb->data + header_len);
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       /* inform the destination node that we are still missing a correct route
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index ace5e55..a65a1c0 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
+       list_add(&hdev->list, &hci_dev_list);
+       write_unlock(&hci_dev_list_lock);
+-      hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+-                                        WQ_MEM_RECLAIM, 1);
++      hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
++                                        WQ_MEM_RECLAIM, 1, hdev->name);
+       if (!hdev->workqueue) {
+               error = -ENOMEM;
+               goto err;
+       }
+-      hdev->req_workqueue = alloc_workqueue(hdev->name,
++      hdev->req_workqueue = alloc_workqueue("%s",
+                                             WQ_HIGHPRI | WQ_UNBOUND |
+-                                            WQ_MEM_RECLAIM, 1);
++                                            WQ_MEM_RECLAIM, 1, hdev->name);
+       if (!hdev->req_workqueue) {
+               destroy_workqueue(hdev->workqueue);
+               error = -ENOMEM;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 9bd7d95..6c4884f 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+                       uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+               }
+-              len = min_t(unsigned int, len, sizeof(uf));
++              len = min((size_t)len, sizeof(uf));
+               if (copy_from_user(&uf, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 68843a2..30e9342 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+                       break;
+               case L2CAP_CONF_RFC:
+-                      if (olen == sizeof(rfc))
+-                              memcpy(&rfc, (void *)val, olen);
++                      if (olen != sizeof(rfc))
++                              break;
++
++                      memcpy(&rfc, (void *)val, olen);
+                       if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+                           rfc.mode != chan->mode)
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 36fed40..be2eeb2 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+       struct sock *sk = sock->sk;
+       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+       struct l2cap_options opts;
+-      int len, err = 0;
++      int err = 0;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+               opts.max_tx   = chan->max_tx;
+               opts.txwin_size = chan->tx_win;
+-              len = min_t(unsigned int, sizeof(opts), optlen);
++              len = min(sizeof(opts), len);
+               if (copy_from_user((char *) &opts, optval, len)) {
+                       err = -EFAULT;
+                       break;
+@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+       struct bt_security sec;
+       struct bt_power pwr;
+       struct l2cap_conn *conn;
+-      int len, err = 0;
++      int err = 0;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+               sec.level = BT_SECURITY_LOW;
+-              len = min_t(unsigned int, sizeof(sec), optlen);
++              len = min(sizeof(sec), len);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+               pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+-              len = min_t(unsigned int, sizeof(pwr), optlen);
++              len = min(sizeof(pwr), len);
+               if (copy_from_user((char *) &pwr, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 30b3721..c1bd0a0 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int err = 0;
+-      size_t len;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+               sec.level = BT_SECURITY_LOW;
+-              len = min_t(unsigned int, sizeof(sec), optlen);
++              len = min(sizeof(sec), len);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index b6e44ad..5b0d514 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
+       BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
+       spin_lock_irqsave(&dev->port.lock, flags);
+-      if (dev->port.count > 0) {
++      if (atomic_read(&dev->port.count) > 0) {
+               spin_unlock_irqrestore(&dev->port.lock, flags);
+               return;
+       }
+@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+               return -ENODEV;
+       BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+-             dev->channel, dev->port.count);
++             dev->channel, atomic_read(&dev->port.count));
+       spin_lock_irqsave(&dev->port.lock, flags);
+-      if (++dev->port.count > 1) {
++      if (atomic_inc_return(&dev->port.count) > 1) {
+               spin_unlock_irqrestore(&dev->port.lock, flags);
+               return 0;
+       }
+@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
+               return;
+       BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
+-                                              dev->port.count);
++                                              atomic_read(&dev->port.count));
+       spin_lock_irqsave(&dev->port.lock, flags);
+-      if (!--dev->port.count) {
++      if (!atomic_dec_return(&dev->port.count)) {
+               spin_unlock_irqrestore(&dev->port.lock, flags);
+               if (dev->tty_dev->parent)
+                       device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 3d110c4..4e1b2eb 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+                       tmp.valid_hooks = t->table->valid_hooks;
+               }
+               mutex_unlock(&ebt_mutex);
+-              if (copy_to_user(user, &tmp, *len) != 0){
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
+                       BUGPRINT("c2u Didn't work\n");
+                       ret = -EFAULT;
+                       break;
+@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+                       goto out;
+               tmp.valid_hooks = t->valid_hooks;
+-              if (copy_to_user(user, &tmp, *len) != 0) {
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+                       ret = -EFAULT;
+                       break;
+               }
+@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+               tmp.entries_size = t->table->entries_size;
+               tmp.valid_hooks = t->table->valid_hooks;
+-              if (copy_to_user(user, &tmp, *len) != 0) {
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+                       ret = -EFAULT;
+                       break;
+               }
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 2bd4b58..0dc30a1 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -10,6 +10,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+ #include <linux/pkt_sched.h>
++#include <linux/sched.h>
+ #include <net/caif/caif_layer.h>
+ #include <net/caif/cfpkt.h>
+ #include <net/caif/cfctrl.h>
+@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
+       memset(&dev_info, 0, sizeof(dev_info));
+       dev_info.id = 0xff;
+       cfsrvl_init(&this->serv, 0, &dev_info, false);
+-      atomic_set(&this->req_seq_no, 1);
+-      atomic_set(&this->rsp_seq_no, 1);
++      atomic_set_unchecked(&this->req_seq_no, 1);
++      atomic_set_unchecked(&this->rsp_seq_no, 1);
+       this->serv.layer.receive = cfctrl_recv;
+       sprintf(this->serv.layer.name, "ctrl");
+       this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
+                             struct cfctrl_request_info *req)
+ {
+       spin_lock_bh(&ctrl->info_list_lock);
+-      atomic_inc(&ctrl->req_seq_no);
+-      req->sequence_no = atomic_read(&ctrl->req_seq_no);
++      atomic_inc_unchecked(&ctrl->req_seq_no);
++      req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
+       list_add_tail(&req->list, &ctrl->list);
+       spin_unlock_bh(&ctrl->info_list_lock);
+ }
+@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+                       if (p != first)
+                               pr_warn("Requests are not received in order\n");
+-                      atomic_set(&ctrl->rsp_seq_no,
++                      atomic_set_unchecked(&ctrl->rsp_seq_no,
+                                        p->sequence_no);
+                       list_del(&p->list);
+                       goto out;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index c4e5085..aa9efdf 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
+ };
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+       .notifier_call = can_notifier,
+ };
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 3ee690e..00d581b 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
+                "default: " __stringify(CGW_DEFAULT_HOPS) ")");
+ static HLIST_HEAD(cgw_list);
+-static struct notifier_block notifier;
+ static struct kmem_cache *cgw_cache __read_mostly;
+@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
+       return err;
+ }
++static struct notifier_block notifier = {
++      .notifier_call = cgw_notifier
++};
++
+ static __init int cgw_module_init(void)
+ {
+       /* sanitize given module parameter */
+@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
+               return -ENOMEM;
+       /* set notifier */
+-      notifier.notifier_call = cgw_notifier;
+       register_netdevice_notifier(&notifier);
+       if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+diff --git a/net/compat.c b/net/compat.c
+index f0a1ba6..0541331 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+           __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+           __get_user(kmsg->msg_flags, &umsg->msg_flags))
+               return -EFAULT;
+-      kmsg->msg_name = compat_ptr(tmp1);
+-      kmsg->msg_iov = compat_ptr(tmp2);
+-      kmsg->msg_control = compat_ptr(tmp3);
++      kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
++      kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
++      kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
+       return 0;
+ }
+@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+       if (kern_msg->msg_namelen) {
+               if (mode == VERIFY_READ) {
+-                      int err = move_addr_to_kernel(kern_msg->msg_name,
++                      int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
+                                                     kern_msg->msg_namelen,
+                                                     kern_address);
+                       if (err < 0)
+@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+               kern_msg->msg_name = NULL;
+       tot_len = iov_from_user_compat_to_kern(kern_iov,
+-                                        (struct compat_iovec __user *)kern_msg->msg_iov,
++                                        (struct compat_iovec __force_user *)kern_msg->msg_iov,
+                                         kern_msg->msg_iovlen);
+       if (tot_len >= 0)
+               kern_msg->msg_iov = kern_iov;
+@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ #define CMSG_COMPAT_FIRSTHDR(msg)                     \
+       (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ?     \
+-       (struct compat_cmsghdr __user *)((msg)->msg_control) :         \
++       (struct compat_cmsghdr __force_user *)((msg)->msg_control) :           \
+        (struct compat_cmsghdr __user *)NULL)
+ #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
+       ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
+        (ucmlen) <= (unsigned long) \
+        ((mhdr)->msg_controllen - \
+-        ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
++        ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
+ static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
+               struct compat_cmsghdr __user *cmsg, int cmsg_len)
+ {
+       char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
+-      if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
++      if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
+                       msg->msg_controllen)
+               return NULL;
+       return (struct compat_cmsghdr __user *)ptr;
+@@ -219,7 +219,7 @@ Efault:
+ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
+ {
+-      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+       struct compat_cmsghdr cmhdr;
+       struct compat_timeval ctv;
+       struct compat_timespec cts[3];
+@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ {
+-      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+       int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
+       int fdnum = scm->fp->count;
+       struct file **fp = scm->fp->fp;
+@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
+               return -EFAULT;
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
++      err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
+       set_fs(old_fs);
+       return err;
+@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
+       len = sizeof(ktime);
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
++      err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
+       set_fs(old_fs);
+       if (!err) {
+@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       case MCAST_JOIN_GROUP:
+       case MCAST_LEAVE_GROUP:
+       {
+-              struct compat_group_req __user *gr32 = (void *)optval;
++              struct compat_group_req __user *gr32 = (void __user *)optval;
+               struct group_req __user *kgr =
+                       compat_alloc_user_space(sizeof(struct group_req));
+               u32 interface;
+@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_UNBLOCK_SOURCE:
+       {
+-              struct compat_group_source_req __user *gsr32 = (void *)optval;
++              struct compat_group_source_req __user *gsr32 = (void __user *)optval;
+               struct group_source_req __user *kgsr = compat_alloc_user_space(
+                       sizeof(struct group_source_req));
+               u32 interface;
+@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       }
+       case MCAST_MSFILTER:
+       {
+-              struct compat_group_filter __user *gf32 = (void *)optval;
++              struct compat_group_filter __user *gf32 = (void __user *)optval;
+               struct group_filter __user *kgf;
+               u32 interface, fmode, numsrc;
+@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
+       char __user *optval, int __user *optlen,
+       int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
+ {
+-      struct compat_group_filter __user *gf32 = (void *)optval;
++      struct compat_group_filter __user *gf32 = (void __user *)optval;
+       struct group_filter __user *kgf;
+       int __user      *koptlen;
+       u32 interface, fmode, numsrc;
+@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
+       if (call < SYS_SOCKET || call > SYS_SENDMMSG)
+               return -EINVAL;
+-      if (copy_from_user(a, args, nas[call]))
++      if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
+               return -EFAULT;
+       a0 = a[0];
+       a1 = a[1];
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index b71423d..0360434 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+       }
+       kfree_skb(skb);
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+       sk_mem_reclaim_partial(sk);
+       return err;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7ddbb31..3902452 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ {
+       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+-                      atomic_long_inc(&dev->rx_dropped);
++                      atomic_long_inc_unchecked(&dev->rx_dropped);
+                       kfree_skb(skb);
+                       return NET_RX_DROP;
+               }
+@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+       skb_orphan(skb);
+       if (unlikely(!is_skb_forwardable(dev, skb))) {
+-              atomic_long_inc(&dev->rx_dropped);
++              atomic_long_inc_unchecked(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+ struct dev_gso_cb {
+       void (*destructor)(struct sk_buff *skb);
+-};
++} __no_const;
+ #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+@@ -3139,7 +3139,7 @@ enqueue:
+       local_irq_restore(flags);
+-      atomic_long_inc(&skb->dev->rx_dropped);
++      atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+       kfree_skb(skb);
+       return NET_RX_DROP;
+ }
+@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+-static void net_tx_action(struct softirq_action *h)
++static void net_tx_action(void)
+ {
+       struct softnet_data *sd = &__get_cpu_var(softnet_data);
+@@ -3545,7 +3545,7 @@ ncls:
+                       ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+       } else {
+ drop:
+-              atomic_long_inc(&skb->dev->rx_dropped);
++              atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+               kfree_skb(skb);
+               /* Jamal, now you will not able to escape explaining
+                * me how you were going to use this. :-)
+@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
+ }
+ EXPORT_SYMBOL(netif_napi_del);
+-static void net_rx_action(struct softirq_action *h)
++static void net_rx_action(void)
+ {
+       struct softnet_data *sd = &__get_cpu_var(softnet_data);
+       unsigned long time_limit = jiffies + 2;
+@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+       } else {
+               netdev_stats_to_stats64(storage, &dev->stats);
+       }
+-      storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
++      storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
+       return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 5b7d0e1..cb960fc 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
+       if (no_module && capable(CAP_NET_ADMIN))
+               no_module = request_module("netdev-%s", name);
+       if (no_module && capable(CAP_SYS_MODULE)) {
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++              ___request_module(true, "grsec_modharden_netdev", "%s", name);
++#else
+               if (!request_module("%s", name))
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
++#endif
+       }
+ }
+ EXPORT_SYMBOL(dev_load);
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index ce91766..3b71cdb 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
+       if (ret)
+               return ret;
+-      len = (tmp.len > dump.len) ? dump.len : tmp.len;
++      len = min(tmp.len, dump.len);
+       if (!len)
+               return -EFAULT;
++      /* Don't ever let the driver think there's more space available
++       * than it requested with .get_dump_flag().
++       */
++      dump.len = len;
++
++      /* Always allocate enough space to hold the whole thing so that the
++       * driver does not need to check the length and bother with partial
++       * dumping.
++       */
+       data = vzalloc(tmp.len);
+       if (!data)
+               return -ENOMEM;
+@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
+       if (ret)
+               goto out;
++      /* There are two sane possibilities:
++       * 1. The driver's .get_dump_data() does not touch dump.len.
++       * 2. Or it may set dump.len to how much it really writes, which
++       *    should be tmp.len (or len if it can do a partial dump).
++       * In any case respond to userspace with the actual length of data
++       * it's receiving.
++       */
++      WARN_ON(dump.len != len && dump.len != tmp.len);
++      dump.len = len;
++
+       if (copy_to_user(useraddr, &dump, sizeof(dump))) {
+               ret = -EFAULT;
+               goto out;
+diff --git a/net/core/flow.c b/net/core/flow.c
+index 7102f16..146b4bd 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -61,7 +61,7 @@ struct flow_cache {
+       struct timer_list               rnd_timer;
+ };
+-atomic_t flow_cache_genid = ATOMIC_INIT(0);
++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
+ EXPORT_SYMBOL(flow_cache_genid);
+ static struct flow_cache flow_cache_global;
+ static struct kmem_cache *flow_cachep __read_mostly;
+@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
+ static int flow_entry_valid(struct flow_cache_entry *fle)
+ {
+-      if (atomic_read(&flow_cache_genid) != fle->genid)
++      if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
+               return 0;
+       if (fle->object && !fle->object->ops->check(fle->object))
+               return 0;
+@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
+                       hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+                       fcp->hash_count++;
+               }
+-      } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
++      } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
+               flo = fle->object;
+               if (!flo)
+                       goto ret_object;
+@@ -279,7 +279,7 @@ nocache:
+       }
+       flo = resolver(net, key, family, dir, flo, ctx);
+       if (fle) {
+-              fle->genid = atomic_read(&flow_cache_genid);
++              fle->genid = atomic_read_unchecked(&flow_cache_genid);
+               if (!IS_ERR(flo))
+                       fle->object = flo;
+               else
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index de178e4..1dabd8b 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+       if (m->msg_namelen) {
+               if (mode == VERIFY_READ) {
+                       void __user *namep;
+-                      namep = (void __user __force *) m->msg_name;
++                      namep = (void __force_user *) m->msg_name;
+                       err = move_addr_to_kernel(namep, m->msg_namelen,
+                                                 address);
+                       if (err < 0)
+@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+       }
+       size = m->msg_iovlen * sizeof(struct iovec);
+-      if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
++      if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
+               return -EFAULT;
+       m->msg_iov = iov;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index ce90b02..8752627 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+                          size_t *lenp, loff_t *ppos)
+ {
+       int size, ret;
+-      ctl_table tmp = *ctl;
++      ctl_table_no_const tmp = *ctl;
+       tmp.extra1 = &zero;
+       tmp.extra2 = &unres_qlen_max;
+diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
+index 569d355..79cf2d0 100644
+--- a/net/core/net-procfs.c
++++ b/net/core/net-procfs.c
+@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
+               else
+                       seq_printf(seq, "%04x", ntohs(pt->type));
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              seq_printf(seq, " %-8s %pf\n",
++                         pt->dev ? pt->dev->name : "", NULL);
++#else
+               seq_printf(seq, " %-8s %pf\n",
+                          pt->dev ? pt->dev->name : "", pt->func);
++#endif
+       }
+       return 0;
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 981fed3..536af34 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
+ }
+ EXPORT_SYMBOL(netdev_class_remove_file);
+-int netdev_kobject_init(void)
++int __init netdev_kobject_init(void)
+ {
+       kobj_ns_type_register(&net_ns_type_operations);
+       return class_register(&net_class);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index f9765203..9feaef8 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
+       int error;
+       LIST_HEAD(net_exit_list);
+-      list_add_tail(&ops->list, list);
++      pax_list_add_tail((struct list_head *)&ops->list, list);
+       if (ops->init || (ops->id && ops->size)) {
+               for_each_net(net) {
+                       error = ops_init(ops, net);
+@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
+ out_undo:
+       /* If I have an error cleanup all namespaces I initialized */
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+       ops_exit_list(ops, &net_exit_list);
+       ops_free_list(ops, &net_exit_list);
+       return error;
+@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
+       struct net *net;
+       LIST_HEAD(net_exit_list);
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+       for_each_net(net)
+               list_add_tail(&net->exit_list, &net_exit_list);
+       ops_exit_list(ops, &net_exit_list);
+@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
+       mutex_lock(&net_mutex);
+       error = register_pernet_operations(&pernet_list, ops);
+       if (!error && (first_device == &pernet_list))
+-              first_device = &ops->list;
++              first_device = (struct list_head *)&ops->list;
+       mutex_unlock(&net_mutex);
+       return error;
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index a08bd2b..c59bd7c 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -58,7 +58,7 @@ struct rtnl_link {
+       rtnl_doit_func          doit;
+       rtnl_dumpit_func        dumpit;
+       rtnl_calcit_func        calcit;
+-};
++} __no_const;
+ static DEFINE_MUTEX(rtnl_mutex);
+@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
+       if (rtnl_link_ops_get(ops->kind))
+               return -EEXIST;
+-      if (!ops->dellink)
+-              ops->dellink = unregister_netdevice_queue;
++      if (!ops->dellink) {
++              pax_open_kernel();
++              *(void **)&ops->dellink = unregister_netdevice_queue;
++              pax_close_kernel();
++      }
+-      list_add_tail(&ops->list, &link_ops);
++      pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_register);
+@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+       for_each_net(net) {
+               __rtnl_kill_links(net, ops);
+       }
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
+       struct nlattr *extfilt;
+       u32 filter_mask = 0;
+-      extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
++      extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
+                                 IFLA_EXT_MASK);
+       if (extfilt)
+               filter_mask = nla_get_u32(extfilt);
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 03795d0..eaf7368 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
+ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ {
+       struct cmsghdr __user *cm
+-              = (__force struct cmsghdr __user *)msg->msg_control;
++              = (struct cmsghdr __force_user *)msg->msg_control;
+       struct cmsghdr cmhdr;
+       int cmlen = CMSG_LEN(len);
+       int err;
+@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+       err = -EFAULT;
+       if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
+               goto out;
+-      if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
++      if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
+               goto out;
+       cmlen = CMSG_SPACE(len);
+       if (msg->msg_controllen < cmlen)
+@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
+ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ {
+       struct cmsghdr __user *cm
+-              = (__force struct cmsghdr __user*)msg->msg_control;
++              = (struct cmsghdr __force_user *)msg->msg_control;
+       int fdmax = 0;
+       int fdnum = scm->fp->count;
+@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+       if (fdnum < fdmax)
+               fdmax = fdnum;
+-      for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
++      for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
+            i++, cmfptr++)
+       {
+               struct socket *sock;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 1c1738c..4cab7f0 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3087,13 +3087,15 @@ void __init skb_init(void)
+       skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+                                             sizeof(struct sk_buff),
+                                             0,
+-                                            SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++                                            SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++                                            SLAB_NO_SANITIZE,
+                                             NULL);
+       skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+                                               (2*sizeof(struct sk_buff)) +
+                                               sizeof(atomic_t),
+                                               0,
+-                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++                                              SLAB_NO_SANITIZE,
+                                               NULL);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d6d024c..6ea7ab4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+       struct sk_buff_head *list = &sk->sk_receive_queue;
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               trace_sock_rcvqueue_full(sk, skb);
+               return -ENOMEM;
+       }
+@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+               return err;
+       if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               return -ENOBUFS;
+       }
+@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+       skb_dst_force(skb);
+       spin_lock_irqsave(&list->lock, flags);
+-      skb->dropcount = atomic_read(&sk->sk_drops);
++      skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+       __skb_queue_tail(list, skb);
+       spin_unlock_irqrestore(&list->lock, flags);
+@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+       skb->dev = NULL;
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               goto discard_and_relse;
+       }
+       if (nested)
+@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+               mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+               bh_unlock_sock(sk);
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               goto discard_and_relse;
+       }
+@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+               struct timeval tm;
+       } v;
+-      int lv = sizeof(int);
+-      int len;
++      unsigned int lv = sizeof(int);
++      unsigned int len;
+       if (get_user(len, optlen))
+               return -EFAULT;
+-      if (len < 0)
++      if (len > INT_MAX)
+               return -EINVAL;
+       memset(&v, 0, sizeof(v));
+@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+       case SO_PEERNAME:
+       {
+-              char address[128];
++              char address[_K_SS_MAXSIZE];
+               if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+                       return -ENOTCONN;
+-              if (lv < len)
++              if (lv < len || sizeof address < len)
+                       return -EINVAL;
+               if (copy_to_user(optval, address, len))
+                       return -EFAULT;
+@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+       if (len > lv)
+               len = lv;
+-      if (copy_to_user(optval, &v, len))
++      if (len > sizeof(v) || copy_to_user(optval, &v, len))
+               return -EFAULT;
+ lenout:
+       if (put_user(len, optlen))
+@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+        */
+       smp_wmb();
+       atomic_set(&sk->sk_refcnt, 1);
+-      atomic_set(&sk->sk_drops, 0);
++      atomic_set_unchecked(&sk->sk_drops, 0);
+ }
+ EXPORT_SYMBOL(sock_init_data);
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index a0e9cf6..ef7f9ed 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -9,26 +9,33 @@
+ #include <linux/inet_diag.h>
+ #include <linux/sock_diag.h>
+-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
++static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
+ static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+ static DEFINE_MUTEX(sock_diag_table_mutex);
+ int sock_diag_check_cookie(void *sk, __u32 *cookie)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
+       if ((cookie[0] != INET_DIAG_NOCOOKIE ||
+            cookie[1] != INET_DIAG_NOCOOKIE) &&
+           ((u32)(unsigned long)sk != cookie[0] ||
+            (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
+               return -ESTALE;
+       else
++#endif
+               return 0;
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
+ void sock_diag_save_cookie(void *sk, __u32 *cookie)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      cookie[0] = 0;
++      cookie[1] = 0;
++#else
+       cookie[0] = (u32)(unsigned long)sk;
+       cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
++#endif
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
+@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
+       mutex_lock(&sock_diag_table_mutex);
+       if (sock_diag_handlers[hndl->family])
+               err = -EBUSY;
+-      else
++      else {
++              pax_open_kernel();
+               sock_diag_handlers[hndl->family] = hndl;
++              pax_close_kernel();
++      }
+       mutex_unlock(&sock_diag_table_mutex);
+       return err;
+@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
+       mutex_lock(&sock_diag_table_mutex);
+       BUG_ON(sock_diag_handlers[family] != hnld);
++      pax_open_kernel();
+       sock_diag_handlers[family] = NULL;
++      pax_close_kernel();
+       mutex_unlock(&sock_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_unregister);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index cfdb46a..cef55e1 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ {
+       unsigned int orig_size, size;
+       int ret, i;
+-      ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &size,
+               .maxlen = sizeof(size),
+               .mode = table->mode
+@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
+ static __net_init int sysctl_core_net_init(struct net *net)
+ {
+-      struct ctl_table *tbl;
++      ctl_table_no_const *tbl = NULL;
+       net->core.sysctl_somaxconn = SOMAXCONN;
+-      tbl = netns_core_table;
+       if (!net_eq(net, &init_net)) {
+-              tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
++              tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
+               if (tbl == NULL)
+                       goto err_dup;
+@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
+               if (net->user_ns != &init_user_ns) {
+                       tbl[0].procname = NULL;
+               }
+-      }
+-
+-      net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
++              net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
++      } else
++              net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
+       if (net->core.sysctl_hdr == NULL)
+               goto err_reg;
+       return 0;
+ err_reg:
+-      if (tbl != netns_core_table)
+-              kfree(tbl);
++      kfree(tbl);
+ err_dup:
+       return -ENOMEM;
+ }
+@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
+       kfree(tbl);
+ }
+-static __net_initdata struct pernet_operations sysctl_core_ops = {
++static __net_initconst struct pernet_operations sysctl_core_ops = {
+       .init = sysctl_core_net_init,
+       .exit = sysctl_core_net_exit,
+ };
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index c21f200..bc4565b 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -465,6 +465,7 @@ static struct proto dn_proto = {
+       .sysctl_rmem            = sysctl_decnet_rmem,
+       .max_header             = DN_MAX_NSP_DATA_HEADER + 64,
+       .obj_size               = sizeof(struct dn_sock),
++      .slab_flags             = SLAB_USERCOPY,
+ };
+ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
+diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
+index a55eecc..dd8428c 100644
+--- a/net/decnet/sysctl_net_decnet.c
++++ b/net/decnet/sysctl_net_decnet.c
+@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
+       if (len > *lenp) len = *lenp;
+-      if (copy_to_user(buffer, addr, len))
++      if (len > sizeof addr || copy_to_user(buffer, addr, len))
+               return -EFAULT;
+       *lenp = len;
+@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
+       if (len > *lenp) len = *lenp;
+-      if (copy_to_user(buffer, devname, len))
++      if (len > sizeof devname || copy_to_user(buffer, devname, len))
+               return -EFAULT;
+       *lenp = len;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index d01be2a..8976537 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
+       BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
+-      sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
+-      if (!sysctl_local_reserved_ports)
+-              goto out;
+-
+       rc = proto_register(&tcp_prot, 1);
+       if (rc)
+-              goto out_free_reserved_ports;
++              goto out;
+       rc = proto_register(&udp_prot, 1);
+       if (rc)
+@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
+       proto_unregister(&udp_prot);
+ out_unregister_tcp_proto:
+       proto_unregister(&tcp_prot);
+-out_free_reserved_ports:
+-      kfree(sysctl_local_reserved_ports);
+       goto out;
+ }
+diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
+index 2e7f194..0fa4d6d 100644
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
+               return;
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+-              atomic_inc(&flow_cache_genid);
++              atomic_inc_unchecked(&flow_cache_genid);
+               rt_genid_bump(net);
+               ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index dfc39d4..0d4fa52 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+               ci = nla_data(tb[IFA_CACHEINFO]);
+               if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
+                       err = -EINVAL;
+-                      goto errout;
++                      goto errout_free;
+               }
+               *pvalid_lft = ci->ifa_valid;
+               *pprefered_lft = ci->ifa_prefered;
+@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+       return ifa;
++errout_free:
++      inet_free_ifa(ifa);
+ errout:
+       return ERR_PTR(err);
+ }
+@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+               idx = 0;
+               head = &net->dev_index_head[h];
+               rcu_read_lock();
+-              cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
++              cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
+                         net->dev_base_seq;
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+                       if (idx < s_idx)
+@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+               idx = 0;
+               head = &net->dev_index_head[h];
+               rcu_read_lock();
+-              cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
++              cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
+                         net->dev_base_seq;
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+                       if (idx < s_idx)
+@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+ #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
+       DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
+-static struct devinet_sysctl_table {
++static const struct devinet_sysctl_table {
+       struct ctl_table_header *sysctl_header;
+       struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
+ } devinet_sysctl = {
+@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
+       int err;
+       struct ipv4_devconf *all, *dflt;
+ #ifdef CONFIG_SYSCTL
+-      struct ctl_table *tbl = ctl_forward_entry;
++      ctl_table_no_const *tbl = NULL;
+       struct ctl_table_header *forw_hdr;
+ #endif
+@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
+                       goto err_alloc_dflt;
+ #ifdef CONFIG_SYSCTL
+-              tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
++              tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
+               if (tbl == NULL)
+                       goto err_alloc_ctl;
+@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
+               goto err_reg_dflt;
+       err = -ENOMEM;
+-      forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
++      if (!net_eq(net, &init_net))
++              forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
++      else
++              forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
+       if (forw_hdr == NULL)
+               goto err_reg_ctl;
+       net->ipv4.forw_hdr = forw_hdr;
+@@ -2237,8 +2242,7 @@ err_reg_ctl:
+ err_reg_dflt:
+       __devinet_sysctl_unregister(all);
+ err_reg_all:
+-      if (tbl != ctl_forward_entry)
+-              kfree(tbl);
++      kfree(tbl);
+ err_alloc_ctl:
+ #endif
+       if (dflt != &ipv4_devconf_dflt)
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 4cfe34d..d2fac8a 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
+       }
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+-               net_adj) & ~(align - 1)) + (net_adj - 2);
++               net_adj) & ~(align - 1)) + net_adj - 2;
+ }
+ static void esp4_err(struct sk_buff *skb, u32 info)
+@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
+               return;
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+-              atomic_inc(&flow_cache_genid);
++              atomic_inc_unchecked(&flow_cache_genid);
+               rt_genid_bump(net);
+               ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index c7629a2..b62d139 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+               fib_sync_up(dev);
+ #endif
+-              atomic_inc(&net->ipv4.dev_addr_genid);
++              atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+               rt_cache_flush(dev_net(dev));
+               break;
+       case NETDEV_DOWN:
+               fib_del_ifaddr(ifa, NULL);
+-              atomic_inc(&net->ipv4.dev_addr_genid);
++              atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+               if (ifa->ifa_dev->ifa_list == NULL) {
+                       /* Last address was deleted from this interface.
+                        * Disable IP.
+@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+               fib_sync_up(dev);
+ #endif
+-              atomic_inc(&net->ipv4.dev_addr_genid);
++              atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+               rt_cache_flush(net);
+               break;
+       case NETDEV_DOWN:
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 8f6cb7a..34507f9 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
+       nh->nh_saddr = inet_select_addr(nh->nh_dev,
+                                       nh->nh_gw,
+                                       nh->nh_parent->fib_scope);
+-      nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++      nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
+       return nh->nh_saddr;
+ }
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 49616fe..6e8a13d 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -71,7 +71,6 @@
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+-#include <linux/prefetch.h>
+ #include <linux/export.h>
+ #include <net/net_namespace.h>
+ #include <net/ip.h>
+@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
+                       if (!c)
+                               continue;
+-                      if (IS_LEAF(c)) {
+-                              prefetch(rcu_dereference_rtnl(p->child[idx]));
++                      if (IS_LEAF(c))
+                               return (struct leaf *) c;
+-                      }
+                       /* Rescan start scanning in new node */
+                       p = (struct tnode *) c;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 6acb541..9ea617d 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
+       .range = { 32768, 61000 },
+ };
+-unsigned long *sysctl_local_reserved_ports;
++unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
+ EXPORT_SYMBOL(sysctl_local_reserved_ports);
+ void inet_get_local_port_range(int *low, int *high)
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 6af375a..c493c74 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -18,12 +18,15 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/wait.h>
++#include <linux/security.h>
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/secure_seq.h>
+ #include <net/ip.h>
++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++
+ /*
+  * Allocate and initialize a new local port bind bucket.
+  * The bindhash mutex for snum's hash chain must be held here.
+@@ -554,6 +557,8 @@ ok:
+                       twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+               spin_unlock(&head->lock);
++              gr_update_task_in_ip_table(current, inet_sk(sk));
++
+               if (tw) {
+                       inet_twsk_deschedule(tw, death_row);
+                       while (twrefcnt) {
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 000e3d2..5472da3 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -503,8 +503,8 @@ relookup:
+       if (p) {
+               p->daddr = *daddr;
+               atomic_set(&p->refcnt, 1);
+-              atomic_set(&p->rid, 0);
+-              atomic_set(&p->ip_id_count,
++              atomic_set_unchecked(&p->rid, 0);
++              atomic_set_unchecked(&p->ip_id_count,
+                               (daddr->family == AF_INET) ?
+                                       secure_ip_id(daddr->addr.a4) :
+                                       secure_ipv6_id(daddr->addr.a6));
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b66910a..cfe416e 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
+               return 0;
+       start = qp->rid;
+-      end = atomic_inc_return(&peer->rid);
++      end = atomic_inc_return_unchecked(&peer->rid);
+       qp->rid = end;
+       rc = qp->q.fragments && (end - start) > max;
+@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
+ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+       struct ctl_table_header *hdr;
+-      table = ip4_frags_ns_ctl_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
++              table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+               /* Don't export sysctls to unprivileged users */
+               if (net->user_ns != &init_user_ns)
+                       table[0].procname = NULL;
+-      }
++              hdr = register_net_sysctl(net, "net/ipv4", table);
++      } else
++              hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
+-      hdr = register_net_sysctl(net, "net/ipv4", table);
+       if (hdr == NULL)
+               goto err_reg;
+@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 855004f..9644112 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
+ module_param(log_ecn_error, bool, 0644);
+ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
++static struct rtnl_link_ops ipgre_link_ops;
+ static int ipgre_tunnel_init(struct net_device *dev);
+ static int ipgre_net_id __read_mostly;
+@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
+       if (daddr)
+               memcpy(&iph->daddr, daddr, 4);
+       if (iph->daddr)
+-              return t->hlen;
++              return t->hlen + sizeof(*iph);
+       return -(t->hlen + sizeof(*iph));
+ }
+@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
+       [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
+ };
+-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
++static struct rtnl_link_ops ipgre_link_ops = {
+       .kind           = "gre",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ipgre_policy,
+@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
+       .fill_info      = ipgre_fill_info,
+ };
+-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
++static struct rtnl_link_ops ipgre_tap_ops = {
+       .kind           = "gretap",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ipgre_policy,
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index d9c4f11..02b82dbc 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+               len = min_t(unsigned int, len, opt->optlen);
+               if (put_user(len, optlen))
+                       return -EFAULT;
+-              if (copy_to_user(optval, opt->__data, len))
++              if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
++                  copy_to_user(optval, opt->__data, len))
+                       return -EFAULT;
+               return 0;
+       }
+@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+               if (sk->sk_type != SOCK_STREAM)
+                       return -ENOPROTOOPT;
+-              msg.msg_control = optval;
++              msg.msg_control = (void __force_kernel *)optval;
+               msg.msg_controllen = len;
+               msg.msg_flags = flags;
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 17cc0ff..63856c4 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -47,7 +47,7 @@
+ #define HASH_SIZE  16
+ #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
+-static struct rtnl_link_ops vti_link_ops __read_mostly;
++static struct rtnl_link_ops vti_link_ops;
+ static int vti_net_id __read_mostly;
+ struct vti_net {
+@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
+       [IFLA_VTI_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ };
+-static struct rtnl_link_ops vti_link_ops __read_mostly = {
++static struct rtnl_link_ops vti_link_ops = {
+       .kind           = "vti",
+       .maxtype        = IFLA_VTI_MAX,
+       .policy         = vti_policy,
+diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
+index 59cb8c7..a72160c 100644
+--- a/net/ipv4/ipcomp.c
++++ b/net/ipv4/ipcomp.c
+@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
+               return;
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+-              atomic_inc(&flow_cache_genid);
++              atomic_inc_unchecked(&flow_cache_genid);
+               rt_genid_bump(net);
+               ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index efa1138..20dbba0 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
+       mm_segment_t oldfs = get_fs();
+       set_fs(get_ds());
+-      res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++      res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+       set_fs(oldfs);
+       return res;
+ }
+@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
+       mm_segment_t oldfs = get_fs();
+       set_fs(get_ds());
+-      res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++      res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+       set_fs(oldfs);
+       return res;
+ }
+@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
+       mm_segment_t oldfs = get_fs();
+       set_fs(get_ds());
+-      res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
++      res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
+       set_fs(oldfs);
+       return res;
+ }
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 7cfc456..e726868 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+ static int ipip_net_id __read_mostly;
+ static int ipip_tunnel_init(struct net_device *dev);
+-static struct rtnl_link_ops ipip_link_ops __read_mostly;
++static struct rtnl_link_ops ipip_link_ops;
+ static int ipip_err(struct sk_buff *skb, u32 info)
+ {
+@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
+       [IFLA_IPTUN_PMTUDISC]           = { .type = NLA_U8 },
+ };
+-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
++static struct rtnl_link_ops ipip_link_ops = {
+       .kind           = "ipip",
+       .maxtype        = IFLA_IPTUN_MAX,
+       .policy         = ipip_policy,
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 85a4f21..1beb1f5 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+ static int get_info(struct net *net, void __user *user,
+-                    const int *len, int compat)
++                    int len, int compat)
+ {
+       char name[XT_TABLE_MAXNAMELEN];
+       struct xt_table *t;
+       int ret;
+-      if (*len != sizeof(struct arpt_getinfo)) {
+-              duprintf("length %u != %Zu\n", *len,
++      if (len != sizeof(struct arpt_getinfo)) {
++              duprintf("length %u != %Zu\n", len,
+                        sizeof(struct arpt_getinfo));
+               return -EINVAL;
+       }
+@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
+               info.size = private->size;
+               strcpy(info.name, name);
+-              if (copy_to_user(user, &info, *len) != 0)
++              if (copy_to_user(user, &info, len) != 0)
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
+       switch (cmd) {
+       case ARPT_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 1);
++              ret = get_info(sock_net(sk), user, *len, 1);
+               break;
+       case ARPT_SO_GET_ENTRIES:
+               ret = compat_get_entries(sock_net(sk), user, len);
+@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
+       switch (cmd) {
+       case ARPT_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 0);
++              ret = get_info(sock_net(sk), user, *len, 0);
+               break;
+       case ARPT_SO_GET_ENTRIES:
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index d23118d..6ad7277 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+ static int get_info(struct net *net, void __user *user,
+-                    const int *len, int compat)
++                    int len, int compat)
+ {
+       char name[XT_TABLE_MAXNAMELEN];
+       struct xt_table *t;
+       int ret;
+-      if (*len != sizeof(struct ipt_getinfo)) {
+-              duprintf("length %u != %zu\n", *len,
++      if (len != sizeof(struct ipt_getinfo)) {
++              duprintf("length %u != %zu\n", len,
+                        sizeof(struct ipt_getinfo));
+               return -EINVAL;
+       }
+@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
+               info.size = private->size;
+               strcpy(info.name, name);
+-              if (copy_to_user(user, &info, *len) != 0)
++              if (copy_to_user(user, &info, len) != 0)
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+       switch (cmd) {
+       case IPT_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 1);
++              ret = get_info(sock_net(sk), user, *len, 1);
+               break;
+       case IPT_SO_GET_ENTRIES:
+               ret = compat_get_entries(sock_net(sk), user, len);
+@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+       switch (cmd) {
+       case IPT_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 0);
++              ret = get_info(sock_net(sk), user, *len, 0);
+               break;
+       case IPT_SO_GET_ENTRIES:
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 7d93d62..cbbf2a3 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
+               atomic_read(&sp->sk_refcnt), sp,
+-              atomic_read(&sp->sk_drops), len);
++              atomic_read_unchecked(&sp->sk_drops), len);
+ }
+ static int ping_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index dd44e0a..06dcca4 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ int raw_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
+ static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+ {
++      struct icmp_filter filter;
++
+       if (optlen > sizeof(struct icmp_filter))
+               optlen = sizeof(struct icmp_filter);
+-      if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
++      if (copy_from_user(&filter, optval, optlen))
+               return -EFAULT;
++      raw_sk(sk)->filter = filter;
+       return 0;
+ }
+ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
+ {
+       int len, ret = -EFAULT;
++      struct icmp_filter filter;
+       if (get_user(len, optlen))
+               goto out;
+@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
+       if (len > sizeof(struct icmp_filter))
+               len = sizeof(struct icmp_filter);
+       ret = -EFAULT;
+-      if (put_user(len, optlen) ||
+-          copy_to_user(optval, &raw_sk(sk)->filter, len))
++      filter = raw_sk(sk)->filter;
++      if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
+               goto out;
+       ret = 0;
+ out:  return ret;
+@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
+-              atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++              atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
+ }
+ static int raw_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d35bbf0..faa3ab8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0200,
+               .proc_handler   = ipv4_sysctl_rtcache_flush,
++              .extra1         = &init_net,
+       },
+       { },
+ };
+ static __net_init int sysctl_route_net_init(struct net *net)
+ {
+-      struct ctl_table *tbl;
++      ctl_table_no_const *tbl = NULL;
+-      tbl = ipv4_route_flush_table;
+       if (!net_eq(net, &init_net)) {
+-              tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
++              tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+               if (tbl == NULL)
+                       goto err_dup;
+               /* Don't export sysctls to unprivileged users */
+               if (net->user_ns != &init_user_ns)
+                       tbl[0].procname = NULL;
+-      }
+-      tbl[0].extra1 = net;
++              tbl[0].extra1 = net;
++              net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
++      } else
++              net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
+-      net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
+       if (net->ipv4.route_hdr == NULL)
+               goto err_reg;
+       return 0;
+ err_reg:
+-      if (tbl != ipv4_route_flush_table)
+-              kfree(tbl);
++      kfree(tbl);
+ err_dup:
+       return -ENOMEM;
+ }
+@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+ static __net_init int rt_genid_init(struct net *net)
+ {
+-      atomic_set(&net->rt_genid, 0);
++      atomic_set_unchecked(&net->rt_genid, 0);
+       get_random_bytes(&net->ipv4.dev_addr_genid,
+                        sizeof(net->ipv4.dev_addr_genid));
+       return 0;
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 3f25e75..3ae0f4d 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
+ {
+       int ret;
+       int range[2];
+-      ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &range,
+               .maxlen = sizeof(range),
+               .mode = table->mode,
+@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
+       int ret;
+       gid_t urange[2];
+       kgid_t low, high;
+-      ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &urange,
+               .maxlen = sizeof(urange),
+               .mode = table->mode,
+@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
+                                      void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       char val[TCP_CA_NAME_MAX];
+-      ctl_table tbl = {
++      ctl_table_no_const tbl = {
+               .data = val,
+               .maxlen = TCP_CA_NAME_MAX,
+       };
+@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
+                                                void __user *buffer, size_t *lenp,
+                                                loff_t *ppos)
+ {
+-      ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
++      ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
+       int ret;
+       tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
+                                          void __user *buffer, size_t *lenp,
+                                          loff_t *ppos)
+ {
+-      ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
++      ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
+       int ret;
+       tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
+       struct mem_cgroup *memcg;
+ #endif
+-      ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &vec,
+               .maxlen = sizeof(vec),
+               .mode = ctl->mode,
+       };
+       if (!write) {
+-              ctl->data = &net->ipv4.sysctl_tcp_mem;
+-              return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
++              ctl_table_no_const tcp_mem = *ctl;
++
++              tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
++              return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
+       }
+       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
+ static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
+                                size_t *lenp, loff_t *ppos)
+ {
+-      ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
++      ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       struct tcp_fastopen_context *ctxt;
+       int ret;
+       u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
+       },
+       {
+               .procname       = "ip_local_reserved_ports",
+-              .data           = NULL, /* initialized in sysctl_ipv4_init */
++              .data           = sysctl_local_reserved_ports,
+               .maxlen         = 65536,
+               .mode           = 0644,
+               .proc_handler   = proc_do_large_bitmap,
+@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
+ static __net_init int ipv4_sysctl_init_net(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+-      table = ipv4_net_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
++              table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
+       tcp_init_mem(net);
+-      net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
++      if (!net_eq(net, &init_net))
++              net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
++      else
++              net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
+       if (net->ipv4.ipv4_hdr == NULL)
+               goto err_reg;
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
+ static __init int sysctl_ipv4_init(void)
+ {
+       struct ctl_table_header *hdr;
+-      struct ctl_table *i;
+-
+-      for (i = ipv4_table; i->procname; i++) {
+-              if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
+-                      i->data = sysctl_local_reserved_ports;
+-                      break;
+-              }
+-      }
+-      if (!i->procname)
+-              return -EINVAL;
+       hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
+       if (hdr == NULL)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9c62257..651cc27 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+  * simplifies code)
+  */
+ static void
+-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
++__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+            struct sk_buff *head, struct sk_buff *tail,
+            u32 start, u32 end)
+ {
+@@ -5522,6 +5522,7 @@ discard:
+           tcp_paws_reject(&tp->rx_opt, 0))
+               goto discard_and_undo;
++#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
+       if (th->syn) {
+               /* We see SYN without ACK. It is attempt of
+                * simultaneous connect with crossed SYNs.
+@@ -5572,6 +5573,7 @@ discard:
+               goto discard;
+ #endif
+       }
++#endif
+       /* "fifth, if neither of the SYN or RST bits is set then
+        * drop the segment and return."
+        */
+@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+                       goto discard;
+               if (th->syn) {
+-                      if (th->fin)
++                      if (th->fin || th->urg || th->psh)
+                               goto discard;
+                       if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+                               return 1;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 7999fc5..c812f42 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
+ EXPORT_SYMBOL(sysctl_tcp_low_latency);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ #ifdef CONFIG_TCP_MD5SIG
+ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
+                              __be32 daddr, __be32 saddr, const struct tcphdr *th);
+@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+       return 0;
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      if (!grsec_enable_blackhole)
++#endif
+       tcp_v4_send_reset(rsk, skb);
+ discard:
+       kfree_skb(skb);
+@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+       TCP_SKB_CB(skb)->sacked  = 0;
+       sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+-      if (!sk)
++      if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              ret = 1;
++#endif
+               goto no_tcp_socket;
+-
++      }
+ process:
+-      if (sk->sk_state == TCP_TIME_WAIT)
++      if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              ret = 2;
++#endif
+               goto do_time_wait;
++      }
+       if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -2058,6 +2072,10 @@ csum_error:
+ bad_packet:
+               TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+       } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              if (!grsec_enable_blackhole || (ret == 1 &&
++                  (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+               tcp_v4_send_reset(NULL, skb);
+       }
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 0f01788..d52a859 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -27,6 +27,10 @@
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int sysctl_tcp_syncookies __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_syncookies);
+@@ -717,7 +721,10 @@ embryonic_reset:
+                * avoid becoming vulnerable to outside attack aiming at
+                * resetting legit local connections.
+                */
+-              req->rsk_ops->send_reset(sk, skb);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              if (!grsec_enable_blackhole)
++#endif
++                      req->rsk_ops->send_reset(sk, skb);
+       } else if (fastopen) { /* received a valid RST pkt */
+               reqsk_fastopen_remove(sk, req, true);
+               tcp_reset(sk);
+diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
+index d4943f6..e7a74a5 100644
+--- a/net/ipv4/tcp_probe.c
++++ b/net/ipv4/tcp_probe.c
+@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
+               if (cnt + width >= len)
+                       break;
+-              if (copy_to_user(buf + cnt, tbuf, width))
++              if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
+                       return -EFAULT;
+               cnt += width;
+       }
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 4b85e6f..22f9ac9 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -22,6 +22,10 @@
+ #include <linux/gfp.h>
+ #include <net/tcp.h>
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_lastack_retries;
++#endif
++
+ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
+ int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
+ int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
+@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
+               }
+       }
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      if ((sk->sk_state == TCP_LAST_ACK) &&
++          (grsec_lastack_retries > 0) &&
++          (grsec_lastack_retries < retry_until))
++              retry_until = grsec_lastack_retries;
++#endif
++
+       if (retransmits_timed_out(sk, retry_until,
+                                 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
+               /* Has it gone just too far? */
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 93b731d..5a2dd92 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -87,6 +87,7 @@
+ #include <linux/types.h>
+ #include <linux/fcntl.h>
+ #include <linux/module.h>
++#include <linux/security.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
+@@ -111,6 +112,10 @@
+ #include <trace/events/skb.h>
+ #include "udp_impl.h"
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ struct udp_table udp_table __read_mostly;
+ EXPORT_SYMBOL(udp_table);
+@@ -594,6 +599,9 @@ found:
+       return s;
+ }
++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
++
+ /*
+  * This routine is called by the ICMP module when it gets some
+  * sort of error condition.  If err < 0 then the socket should
+@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               dport = usin->sin_port;
+               if (dport == 0)
+                       return -EINVAL;
++
++              err = gr_search_udp_sendmsg(sk, usin);
++              if (err)
++                      return err;
+       } else {
+               if (sk->sk_state != TCP_ESTABLISHED)
+                       return -EDESTADDRREQ;
++
++              err = gr_search_udp_sendmsg(sk, NULL);
++              if (err)
++                      return err;
++
+               daddr = inet->inet_daddr;
+               dport = inet->inet_dport;
+               /* Open fast path for connected socket.
+@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
+                                IS_UDPLITE(sk));
+               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+                                IS_UDPLITE(sk));
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               __skb_unlink(skb, rcvq);
+               __skb_queue_tail(&list_kill, skb);
+       }
+@@ -1222,6 +1239,10 @@ try_again:
+       if (!skb)
+               goto out;
++      err = gr_search_udp_recvmsg(sk, skb);
++      if (err)
++              goto out_free;
++
+       ulen = skb->len - sizeof(struct udphdr);
+       copied = len;
+       if (copied > ulen)
+@@ -1255,7 +1276,7 @@ try_again:
+       if (unlikely(err)) {
+               trace_kfree_skb(skb, udp_recvmsg);
+               if (!peeked) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       UDP_INC_STATS_USER(sock_net(sk),
+                                          UDP_MIB_INERRORS, is_udplite);
+               }
+@@ -1542,7 +1563,7 @@ csum_error:
+       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+ drop:
+       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+       kfree_skb(skb);
+       return -1;
+ }
+@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+                       skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+               if (!skb1) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                        IS_UDPLITE(sk));
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+               goto csum_error;
+       UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+       /*
+@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
+               atomic_read(&sp->sk_refcnt), sp,
+-              atomic_read(&sp->sk_drops), len);
++              atomic_read_unchecked(&sp->sk_drops), len);
+ }
+ int udp4_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index 9a459be..086b866 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
+ static int __net_init xfrm4_net_init(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+       struct ctl_table_header *hdr;
+-      table = xfrm4_policy_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
++              table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
+               if (!table)
+                       goto err_alloc;
+               table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
+-      }
+-
+-      hdr = register_net_sysctl(net, "net/ipv4", table);
++              hdr = register_net_sysctl(net, "net/ipv4", table);
++      } else
++              hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
+       if (!hdr)
+               goto err_reg;
+@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index fb8c94c..fb18024 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+               idx = 0;
+               head = &net->dev_index_head[h];
+               rcu_read_lock();
+-              cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
++              cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
+                         net->dev_base_seq;
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+                       if (idx < s_idx)
+@@ -2380,7 +2380,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
+               p.iph.ihl = 5;
+               p.iph.protocol = IPPROTO_IPV6;
+               p.iph.ttl = 64;
+-              ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
++              ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
+               if (ops->ndo_do_ioctl) {
+                       mm_segment_t oldfs = get_fs();
+@@ -4002,7 +4002,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+       s_ip_idx = ip_idx = cb->args[2];
+       rcu_read_lock();
+-      cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
++      cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
+       for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+               idx = 0;
+               head = &net->dev_index_head[h];
+@@ -4587,7 +4587,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+                       dst_free(&ifp->rt->dst);
+               break;
+       }
+-      atomic_inc(&net->ipv6.dev_addr_genid);
++      atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
+ }
+ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+@@ -4607,7 +4607,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
+       int *valp = ctl->data;
+       int val = *valp;
+       loff_t pos = *ppos;
+-      ctl_table lctl;
++      ctl_table_no_const lctl;
+       int ret;
+       /*
+@@ -4689,7 +4689,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
+       int *valp = ctl->data;
+       int val = *valp;
+       loff_t pos = *ppos;
+-      ctl_table lctl;
++      ctl_table_no_const lctl;
+       int ret;
+       /*
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 40ffd72..aeac0dc 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
+               net_adj = 0;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+-               net_adj) & ~(align - 1)) + (net_adj - 2);
++               net_adj) & ~(align - 1)) + net_adj - 2;
+ }
+ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index b4ff0a4..db9b764 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
+ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(ipv6_icmp_table_template,
+                       sizeof(ipv6_icmp_table_template),
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index ecd6073..58162ae 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -74,7 +74,7 @@ struct ip6gre_net {
+       struct net_device *fb_tunnel_dev;
+ };
+-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
++static struct rtnl_link_ops ip6gre_link_ops;
+ static int ip6gre_tunnel_init(struct net_device *dev);
+ static void ip6gre_tunnel_setup(struct net_device *dev);
+ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
+ }
+-static struct inet6_protocol ip6gre_protocol __read_mostly = {
++static struct inet6_protocol ip6gre_protocol = {
+       .handler     = ip6gre_rcv,
+       .err_handler = ip6gre_err,
+       .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+       [IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
+ };
+-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
++static struct rtnl_link_ops ip6gre_link_ops = {
+       .kind           = "ip6gre",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+       .fill_info      = ip6gre_fill_info,
+ };
+-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
++static struct rtnl_link_ops ip6gre_tap_ops = {
+       .kind           = "ip6gretap",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 1e55866..b398dab 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+ static int ip6_tnl_dev_init(struct net_device *dev);
+ static void ip6_tnl_dev_setup(struct net_device *dev);
+-static struct rtnl_link_ops ip6_link_ops __read_mostly;
++static struct rtnl_link_ops ip6_link_ops;
+ static int ip6_tnl_net_id __read_mostly;
+ struct ip6_tnl_net {
+@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
+       [IFLA_IPTUN_PROTO]              = { .type = NLA_U8 },
+ };
+-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
++static struct rtnl_link_ops ip6_link_ops = {
+       .kind           = "ip6tnl",
+       .maxtype        = IFLA_IPTUN_MAX,
+       .policy         = ip6_tnl_policy,
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index d1e2e8e..51c19ae 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+               if (sk->sk_type != SOCK_STREAM)
+                       return -ENOPROTOOPT;
+-              msg.msg_control = optval;
++              msg.msg_control = (void __force_kernel *)optval;
+               msg.msg_controllen = len;
+               msg.msg_flags = flags;
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 44400c2..8e11f52 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+ static int get_info(struct net *net, void __user *user,
+-                    const int *len, int compat)
++                    int len, int compat)
+ {
+       char name[XT_TABLE_MAXNAMELEN];
+       struct xt_table *t;
+       int ret;
+-      if (*len != sizeof(struct ip6t_getinfo)) {
+-              duprintf("length %u != %zu\n", *len,
++      if (len != sizeof(struct ip6t_getinfo)) {
++              duprintf("length %u != %zu\n", len,
+                        sizeof(struct ip6t_getinfo));
+               return -EINVAL;
+       }
+@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
+               info.size = private->size;
+               strcpy(info.name, name);
+-              if (copy_to_user(user, &info, *len) != 0)
++              if (copy_to_user(user, &info, len) != 0)
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+       switch (cmd) {
+       case IP6T_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 1);
++              ret = get_info(sock_net(sk), user, *len, 1);
+               break;
+       case IP6T_SO_GET_ENTRIES:
+               ret = compat_get_entries(sock_net(sk), user, len);
+@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+       switch (cmd) {
+       case IP6T_SO_GET_INFO:
+-              ret = get_info(sock_net(sk), user, len, 0);
++              ret = get_info(sock_net(sk), user, *len, 0);
+               break;
+       case IP6T_SO_GET_ENTRIES:
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index dffdc1a..ccc6678 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
+ static int nf_ct_frag6_sysctl_register(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+       struct ctl_table_header *hdr;
+-      table = nf_ct_frag6_sysctl_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
++              table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
+                               GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
+               table[0].data = &net->nf_frag.frags.timeout;
+               table[1].data = &net->nf_frag.frags.low_thresh;
+               table[2].data = &net->nf_frag.frags.high_thresh;
+-      }
+-
+-      hdr = register_net_sysctl(net, "net/netfilter", table);
++              hdr = register_net_sysctl(net, "net/netfilter", table);
++      } else
++              hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
+       if (hdr == NULL)
+               goto err_reg;
+@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index eedff8c..6e13a47 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ {
+       if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
+           skb_checksum_complete(skb)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+       struct raw6_sock *rp = raw6_sk(sk);
+       if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+       if (inet->hdrincl) {
+               if (skb_checksum_complete(skb)) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       kfree_skb(skb);
+                       return NET_RX_DROP;
+               }
+@@ -602,7 +602,7 @@ out:
+       return err;
+ }
+-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
+                       struct flowi6 *fl6, struct dst_entry **dstp,
+                       unsigned int flags)
+ {
+@@ -914,12 +914,15 @@ do_confirm:
+ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+                              char __user *optval, int optlen)
+ {
++      struct icmp6_filter filter;
++
+       switch (optname) {
+       case ICMPV6_FILTER:
+               if (optlen > sizeof(struct icmp6_filter))
+                       optlen = sizeof(struct icmp6_filter);
+-              if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
++              if (copy_from_user(&filter, optval, optlen))
+                       return -EFAULT;
++              raw6_sk(sk)->filter = filter;
+               return 0;
+       default:
+               return -ENOPROTOOPT;
+@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+                              char __user *optval, int __user *optlen)
+ {
+       int len;
++      struct icmp6_filter filter;
+       switch (optname) {
+       case ICMPV6_FILTER:
+@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+                       len = sizeof(struct icmp6_filter);
+               if (put_user(len, optlen))
+                       return -EFAULT;
+-              if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
++              filter = raw6_sk(sk)->filter;
++              if (len > sizeof filter || copy_to_user(optval, &filter, len))
+                       return -EFAULT;
+               return 0;
+       default:
+@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
+                  sock_i_ino(sp),
+-                 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++                 atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
+ }
+ static int raw6_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 790d9f4..68ae078 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
+ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+       struct ctl_table_header *hdr;
+-      table = ip6_frags_ns_ctl_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
++              table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
+               /* Don't export sysctls to unprivileged users */
+               if (net->user_ns != &init_user_ns)
+                       table[0].procname = NULL;
+-      }
++              hdr = register_net_sysctl(net, "net/ipv6", table);
++      } else
++              hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
+-      hdr = register_net_sysctl(net, "net/ipv6", table);
+       if (hdr == NULL)
+               goto err_reg;
+@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bacce6c..9d1741a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
+ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(ipv6_route_table_template,
+                       sizeof(ipv6_route_table_template),
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 60df36d..f3ab7c8 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
+ static void ipip6_dev_free(struct net_device *dev);
+ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
+                     __be32 *v4dst);
+-static struct rtnl_link_ops sit_link_ops __read_mostly;
++static struct rtnl_link_ops sit_link_ops;
+ static int sit_net_id __read_mostly;
+ struct sit_net {
+@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
+ #endif
+ };
+-static struct rtnl_link_ops sit_link_ops __read_mostly = {
++static struct rtnl_link_ops sit_link_ops = {
+       .kind           = "sit",
+       .maxtype        = IFLA_IPTUN_MAX,
+       .policy         = ipip6_policy,
+diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
+index e85c48b..b8268d3 100644
+--- a/net/ipv6/sysctl_net_ipv6.c
++++ b/net/ipv6/sysctl_net_ipv6.c
+@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
+ static int __net_init ipv6_sysctl_net_init(struct net *net)
+ {
+-      struct ctl_table *ipv6_table;
++      ctl_table_no_const *ipv6_table;
+       struct ctl_table *ipv6_route_table;
+       struct ctl_table *ipv6_icmp_table;
+       int err;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 0a17ed9..2526cc3 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+               inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+ }
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ static void tcp_v6_hash(struct sock *sk)
+ {
+       if (sk->sk_state != TCP_CLOSE) {
+@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+       return 0;
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      if (!grsec_enable_blackhole)
++#endif
+       tcp_v6_send_reset(sk, skb);
+ discard:
+       if (opt_skb)
+@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+       TCP_SKB_CB(skb)->sacked = 0;
+       sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+-      if (!sk)
++      if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              ret = 1;
++#endif
+               goto no_tcp_socket;
++      }
+ process:
+-      if (sk->sk_state == TCP_TIME_WAIT)
++      if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              ret = 2;
++#endif
+               goto do_time_wait;
++      }
+       if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -1536,6 +1551,10 @@ csum_error:
+ bad_packet:
+               TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+       } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++              if (!grsec_enable_blackhole || (ret == 1 &&
++                  (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+               tcp_v6_send_reset(NULL, skb);
+       }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e7b28f9..d09c290 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -52,6 +52,10 @@
+ #include <trace/events/skb.h>
+ #include "udp_impl.h"
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
+ {
+       const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
+@@ -419,7 +423,7 @@ try_again:
+       if (unlikely(err)) {
+               trace_kfree_skb(skb, udpv6_recvmsg);
+               if (!peeked) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       if (is_udp4)
+                               UDP_INC_STATS_USER(sock_net(sk),
+                                                  UDP_MIB_INERRORS,
+@@ -665,7 +669,7 @@ csum_error:
+       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+ drop:
+       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+       kfree_skb(skb);
+       return -1;
+ }
+@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+               if (likely(skb1 == NULL))
+                       skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+               if (!skb1) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                         IS_UDPLITE(sk));
+                       UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+               goto csum_error;
+       UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+       kfree_skb(skb);
+@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
+                  0,
+                  sock_i_ino(sp),
+                  atomic_read(&sp->sk_refcnt), sp,
+-                 atomic_read(&sp->sk_drops));
++                 atomic_read_unchecked(&sp->sk_drops));
+ }
+ int udp6_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 23ed03d..465a71d 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
+ static int __net_init xfrm6_net_init(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table = NULL;
+       struct ctl_table_header *hdr;
+-      table = xfrm6_policy_table;
+       if (!net_eq(net, &init_net)) {
+-              table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
++              table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
+               if (!table)
+                       goto err_alloc;
+               table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
+-      }
++              hdr = register_net_sysctl(net, "net/ipv6", table);
++      } else
++              hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
+-      hdr = register_net_sysctl(net, "net/ipv6", table);
+       if (!hdr)
+               goto err_reg;
+@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
+       return 0;
+ err_reg:
+-      if (!net_eq(net, &init_net))
+-              kfree(table);
++      kfree(table);
+ err_alloc:
+       return -ENOMEM;
+ }
+diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
+index 41ac7938..75e3bb1 100644
+--- a/net/irda/ircomm/ircomm_tty.c
++++ b/net/irda/ircomm/ircomm_tty.c
+@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+       add_wait_queue(&port->open_wait, &wait);
+       IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
+-            __FILE__, __LINE__, tty->driver->name, port->count);
++            __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
+-              port->count--;
++              atomic_dec(&port->count);
+       port->blocked_open++;
+       spin_unlock_irqrestore(&port->lock, flags);
+@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+               }
+               IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
+-                    __FILE__, __LINE__, tty->driver->name, port->count);
++                    __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
+               schedule();
+       }
+@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       spin_unlock_irqrestore(&port->lock, flags);
+       IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+-            __FILE__, __LINE__, tty->driver->name, port->count);
++            __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
+       if (!retval)
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
+       /* ++ is not atomic, so this should be protected - Jean II */
+       spin_lock_irqsave(&self->port.lock, flags);
+-      self->port.count++;
++      atomic_inc(&self->port.count);
+       spin_unlock_irqrestore(&self->port.lock, flags);
+       tty_port_tty_set(&self->port, tty);
+       IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+-                 self->line, self->port.count);
++                 self->line, atomic_read(&self->port.count));
+       /* Not really used by us, but lets do it anyway */
+       self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
+               tty_kref_put(port->tty);
+       }
+       port->tty = NULL;
+-      port->count = 0;
++      atomic_set(&port->count, 0);
+       spin_unlock_irqrestore(&port->lock, flags);
+       wake_up_interruptible(&port->open_wait);
+@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
+       seq_putc(m, '\n');
+       seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+-      seq_printf(m, "Open count: %d\n", self->port.count);
++      seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
+       seq_printf(m, "Max data size: %d\n", self->max_data_size);
+       seq_printf(m, "Max header size: %d\n", self->max_header_size);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index ae69165..c8b82d8 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
+       write_lock_bh(&iucv_sk_list.lock);
+-      sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
++      sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+       while (__iucv_get_sock_by_name(name)) {
+               sprintf(name, "%08x",
+-                      atomic_inc_return(&iucv_sk_list.autobind_name));
++                      atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+       }
+       write_unlock_bh(&iucv_sk_list.lock);
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index 4fe76ff..426a904 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata iucv_cpu_notifier = {
++static struct notifier_block iucv_cpu_notifier = {
+       .notifier_call = iucv_cpu_notify,
+ };
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index ab8bd2c..cd2d641 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
+ static u32 get_acqseq(void)
+ {
+       u32 res;
+-      static atomic_t acqseq;
++      static atomic_unchecked_t acqseq;
+       do {
+-              res = atomic_inc_return(&acqseq);
++              res = atomic_inc_return_unchecked(&acqseq);
+       } while (!res);
+       return res;
+ }
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index ae36f8e..09d42ac 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
+                       ret = ieee80211_vif_use_channel(sdata, chandef,
+                                       IEEE80211_CHANCTX_EXCLUSIVE);
+               }
+-      } else if (local->open_count == local->monitors) {
++      } else if (local_read(&local->open_count) == local->monitors) {
+               local->_oper_chandef = *chandef;
+               ieee80211_hw_config(local, 0);
+       }
+@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
+               else
+                       local->probe_req_reg--;
+-              if (!local->open_count)
++              if (!local_read(&local->open_count))
+                       break;
+               ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
+       if (chanctx_conf) {
+               *chandef = chanctx_conf->def;
+               ret = 0;
+-      } else if (local->open_count > 0 &&
+-                 local->open_count == local->monitors &&
++      } else if (local_read(&local->open_count) > 0 &&
++                 local_read(&local->open_count) == local->monitors &&
+                  sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+               if (local->use_chanctx)
+                       *chandef = local->monitor_chandef;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 9ca8e32..48e4a9b 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -28,6 +28,7 @@
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+ #include <net/mac80211.h>
++#include <asm/local.h>
+ #include "key.h"
+ #include "sta_info.h"
+ #include "debug.h"
+@@ -891,7 +892,7 @@ struct ieee80211_local {
+       /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
+       spinlock_t queue_stop_reason_lock;
+-      int open_count;
++      local_t open_count;
+       int monitors, cooked_mntrs;
+       /* number of interfaces with corresponding FIF_ flags */
+       int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 514e90f..56f22bf 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+               break;
+       }
+-      if (local->open_count == 0) {
++      if (local_read(&local->open_count) == 0) {
+               res = drv_start(local);
+               if (res)
+                       goto err_del_bss;
+@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+                       break;
+               }
+-              if (local->monitors == 0 && local->open_count == 0) {
++              if (local->monitors == 0 && local_read(&local->open_count) == 0) {
+                       res = ieee80211_add_virtual_monitor(local);
+                       if (res)
+                               goto err_stop;
+@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+               atomic_inc(&local->iff_promiscs);
+       if (coming_up)
+-              local->open_count++;
++              local_inc(&local->open_count);
+       if (hw_reconf_flags)
+               ieee80211_hw_config(local, hw_reconf_flags);
+@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+  err_del_interface:
+       drv_remove_interface(local, sdata);
+  err_stop:
+-      if (!local->open_count)
++      if (!local_read(&local->open_count))
+               drv_stop(local);
+  err_del_bss:
+       sdata->bss = NULL;
+@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+       }
+       if (going_down)
+-              local->open_count--;
++              local_dec(&local->open_count);
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP_VLAN:
+@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+       }
+       spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+-      if (local->open_count == 0)
++      if (local_read(&local->open_count) == 0)
+               ieee80211_clear_tx_pending(local);
+       /*
+@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+       ieee80211_recalc_ps(local, -1);
+-      if (local->open_count == 0) {
++      if (local_read(&local->open_count) == 0) {
+               ieee80211_stop_device(local);
+               /* no reconfiguring after stop! */
+@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+       ieee80211_configure_filter(local);
+       ieee80211_hw_config(local, hw_reconf_flags);
+-      if (local->monitors == local->open_count)
++      if (local->monitors == local_read(&local->open_count))
+               ieee80211_add_virtual_monitor(local);
+ }
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 8a7bfc4..4407cd0 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+               changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
+                            IEEE80211_CONF_CHANGE_POWER);
+-      if (changed && local->open_count) {
++      if (changed && local_read(&local->open_count)) {
+               ret = drv_config(local, changed);
+               /*
+                * Goal:
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index 3401262..d5cd68d 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+       struct ieee80211_sub_if_data *sdata;
+       struct sta_info *sta;
+-      if (!local->open_count)
++      if (!local_read(&local->open_count))
+               goto suspend;
+       ieee80211_scan_cancel(local);
+@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+       cancel_work_sync(&local->dynamic_ps_enable_work);
+       del_timer_sync(&local->dynamic_ps_timer);
+-      local->wowlan = wowlan && local->open_count;
++      local->wowlan = wowlan && local_read(&local->open_count);
+       if (local->wowlan) {
+               int err = drv_suspend(local, wowlan);
+               if (err < 0) {
+@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+       WARN_ON(!list_empty(&local->chanctx_list));
+       /* stop hardware - this must stop RX */
+-      if (local->open_count)
++      if (local_read(&local->open_count))
+               ieee80211_stop_device(local);
+  suspend:
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index a02bef3..f2f38dd 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
+       ASSERT_RTNL();
+-      if (local->open_count)
++      if (local_read(&local->open_count))
+               return -EBUSY;
+       if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
+index c97a065..ff61928 100644
+--- a/net/mac80211/rc80211_pid_debugfs.c
++++ b/net/mac80211/rc80211_pid_debugfs.c
+@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
+       spin_unlock_irqrestore(&events->lock, status);
+-      if (copy_to_user(buf, pb, p))
++      if (p > sizeof(pb) || copy_to_user(buf, pb, p))
+               return -EFAULT;
+       return p;
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 72e6292..e6319eb 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+       }
+ #endif
+       /* everything else happens only if HW was up & running */
+-      if (!local->open_count)
++      if (!local_read(&local->open_count))
+               goto wake_up;
+       /*
+@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+       local->in_reconfig = false;
+       barrier();
+-      if (local->monitors == local->open_count && local->monitors > 0)
++      if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
+               ieee80211_add_virtual_monitor(local);
+       /*
+diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
+index 56d22ca..87c778f 100644
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
+         To compile it as a module, choose M here.  If unsure, say N.
++config NETFILTER_XT_MATCH_GRADM
++      tristate '"gradm" match support'
++      depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
++      depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
++      ---help---
++        The gradm match allows to match on grsecurity RBAC being enabled.
++        It is useful when iptables rules are applied early on bootup to
++        prevent connections to the machine (except from a trusted host)
++        while the RBAC system is disabled.
++
+ config NETFILTER_XT_MATCH_HASHLIMIT
+       tristate '"hashlimit" match support'
+       depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
+index a1abf87..dbcb7ee 100644
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index f771390..145b765 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1820,7 +1820,7 @@ done:
+       return ret;
+ }
+-static struct nf_sockopt_ops so_set __read_mostly = {
++static struct nf_sockopt_ops so_set = {
+       .pf             = PF_INET,
+       .get_optmin     = SO_IP_SET,
+       .get_optmax     = SO_IP_SET + 1,
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index a083bda..da661c3 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
+       /* Increase the refcnt counter of the dest */
+       ip_vs_dest_hold(dest);
+-      conn_flags = atomic_read(&dest->conn_flags);
++      conn_flags = atomic_read_unchecked(&dest->conn_flags);
+       if (cp->protocol != IPPROTO_UDP)
+               conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+       flags = cp->flags;
+@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
+       cp->control = NULL;
+       atomic_set(&cp->n_control, 0);
+-      atomic_set(&cp->in_pkts, 0);
++      atomic_set_unchecked(&cp->in_pkts, 0);
+       cp->packet_xmit = NULL;
+       cp->app = NULL;
+@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
+       /* Don't drop the entry if its number of incoming packets is not
+          located in [0, 8] */
+-      i = atomic_read(&cp->in_pkts);
++      i = atomic_read_unchecked(&cp->in_pkts);
+       if (i > 8 || i < 0) return 0;
+       if (!todrop_rate[i]) return 0;
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 23b8eb5..48a8959 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+               ret = cp->packet_xmit(skb, cp, pd->pp, iph);
+               /* do not touch skb anymore */
+-              atomic_inc(&cp->in_pkts);
++              atomic_inc_unchecked(&cp->in_pkts);
+               ip_vs_conn_put(cp);
+               return ret;
+       }
+@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               pkts = sysctl_sync_threshold(ipvs);
+       else
+-              pkts = atomic_add_return(1, &cp->in_pkts);
++              pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+       if (ipvs->sync_state & IP_VS_STATE_MASTER)
+               ip_vs_sync_conn(net, cp, pkts);
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 9e6c2a0..28552e2 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
+                */
+               ip_vs_rs_hash(ipvs, dest);
+       }
+-      atomic_set(&dest->conn_flags, conn_flags);
++      atomic_set_unchecked(&dest->conn_flags, conn_flags);
+       /* bind the service */
+       if (!dest->svc) {
+@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
+  *    align with netns init in ip_vs_control_net_init()
+  */
+-static struct ctl_table vs_vars[] = {
++static ctl_table_no_const vs_vars[] __read_only = {
+       {
+               .procname       = "amemthresh",
+               .maxlen         = sizeof(int),
+@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+                                          "      %-7s %-6d %-10d %-10d\n",
+                                          &dest->addr.in6,
+                                          ntohs(dest->port),
+-                                         ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++                                         ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+                                          atomic_read(&dest->weight),
+                                          atomic_read(&dest->activeconns),
+                                          atomic_read(&dest->inactconns));
+@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+                                          "%-7s %-6d %-10d %-10d\n",
+                                          ntohl(dest->addr.ip),
+                                          ntohs(dest->port),
+-                                         ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++                                         ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+                                          atomic_read(&dest->weight),
+                                          atomic_read(&dest->activeconns),
+                                          atomic_read(&dest->inactconns));
+@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
+                       entry.addr = dest->addr.ip;
+                       entry.port = dest->port;
+-                      entry.conn_flags = atomic_read(&dest->conn_flags);
++                      entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
+                       entry.weight = atomic_read(&dest->weight);
+                       entry.u_threshold = dest->u_threshold;
+                       entry.l_threshold = dest->l_threshold;
+@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
+       if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
+           nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+-                      (atomic_read(&dest->conn_flags) &
++                      (atomic_read_unchecked(&dest->conn_flags) &
+                        IP_VS_CONN_F_FWD_MASK)) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
+                       atomic_read(&dest->weight)) ||
+@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+ {
+       int idx;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+-      struct ctl_table *tbl;
++      ctl_table_no_const *tbl;
+       atomic_set(&ipvs->dropentry, 0);
+       spin_lock_init(&ipvs->dropentry_lock);
+diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
+index 5ea26bd..c9bc65f 100644
+--- a/net/netfilter/ipvs/ip_vs_lblc.c
++++ b/net/netfilter/ipvs/ip_vs_lblc.c
+@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
+  *      IPVS LBLC sysctl table
+  */
+ #ifdef CONFIG_SYSCTL
+-static ctl_table vs_vars_table[] = {
++static ctl_table_no_const vs_vars_table[] __read_only = {
+       {
+               .procname       = "lblc_expiration",
+               .data           = NULL,
+diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
+index 50123c2..067c773 100644
+--- a/net/netfilter/ipvs/ip_vs_lblcr.c
++++ b/net/netfilter/ipvs/ip_vs_lblcr.c
+@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
+  *      IPVS LBLCR sysctl table
+  */
+-static ctl_table vs_vars_table[] = {
++static ctl_table_no_const vs_vars_table[] __read_only = {
+       {
+               .procname       = "lblcr_expiration",
+               .data           = NULL,
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index f6046d9..4f10cfd 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+       cp = cp->control;
+       if (cp) {
+               if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+-                      pkts = atomic_add_return(1, &cp->in_pkts);
++                      pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+               else
+                       pkts = sysctl_sync_threshold(ipvs);
+               ip_vs_sync_conn(net, cp->control, pkts);
+@@ -758,7 +758,7 @@ control:
+       if (!cp)
+               return;
+       if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+-              pkts = atomic_add_return(1, &cp->in_pkts);
++              pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+       else
+               pkts = sysctl_sync_threshold(ipvs);
+       goto sloop;
+@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+       if (opt)
+               memcpy(&cp->in_seq, opt, sizeof(*opt));
+-      atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
++      atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+       cp->state = state;
+       cp->old_state = cp->state;
+       /*
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index b75ff64..0c51bbe 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+               else
+                       rc = NF_ACCEPT;
+               /* do not touch skb anymore */
+-              atomic_inc(&cp->in_pkts);
++              atomic_inc_unchecked(&cp->in_pkts);
+               goto out;
+       }
+@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+               else
+                       rc = NF_ACCEPT;
+               /* do not touch skb anymore */
+-              atomic_inc(&cp->in_pkts);
++              atomic_inc_unchecked(&cp->in_pkts);
+               goto out;
+       }
+diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
+index 2d3030a..7ba1c0a 100644
+--- a/net/netfilter/nf_conntrack_acct.c
++++ b/net/netfilter/nf_conntrack_acct.c
+@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_acct_init_sysctl(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
+                       GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 0283bae..5febcb0 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
+ #define DYING_NULLS_VAL               ((1<<30)+1)
+ #define TEMPLATE_NULLS_VAL    ((1<<30)+2)
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
++#endif
++
+ int nf_conntrack_init_net(struct net *net)
+ {
+       int ret;
+@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
+               goto err_stat;
+       }
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
++#else
+       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++#endif
+       if (!net->ct.slabname) {
+               ret = -ENOMEM;
+               goto err_slabname;
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 1df1761..ce8b88a 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_event_init_sysctl(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
+                       GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
+index 974a2a4..52cc6ff 100644
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
+ static int nf_conntrack_helper_init_sysctl(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
+                       GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
+index 0ab9636..cea3c6a 100644
+--- a/net/netfilter/nf_conntrack_proto.c
++++ b/net/netfilter/nf_conntrack_proto.c
+@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
+ static void
+ nf_ct_unregister_sysctl(struct ctl_table_header **header,
+-                      struct ctl_table **table,
++                      ctl_table_no_const **table,
+                       unsigned int users)
+ {
+       if (users > 0)
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index a99b6c3..3841268 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ out_invalid:
+       if (LOG_INVALID(net, IPPROTO_DCCP))
+               nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
+-                            NULL, msg);
++                            NULL, "%s", msg);
+       return false;
+ }
+@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ out_invalid:
+       if (LOG_INVALID(net, IPPROTO_DCCP))
+-              nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
++              nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
+       return -NF_ACCEPT;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 4d4d8f1..e0f9a32 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+       const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+       __u32 seq, ack, sack, end, win, swin;
+       s16 receiver_offset;
+-      bool res;
++      bool res, in_recv_win;
+       /*
+        * Get the required data from the packet.
+@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
+                receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+                receiver->td_scale);
++      /* Is the ending sequence in the receive window (if available)? */
++      in_recv_win = !receiver->td_maxwin ||
++                    after(end, sender->td_end - receiver->td_maxwin - 1);
++
+       pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
+                before(seq, sender->td_maxend + 1),
+-               after(end, sender->td_end - receiver->td_maxwin - 1),
++               (in_recv_win ? 1 : 0),
+                before(sack, receiver->td_end + 1),
+                after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
+       if (before(seq, sender->td_maxend + 1) &&
+-          after(end, sender->td_end - receiver->td_maxwin - 1) &&
++          in_recv_win &&
+           before(sack, receiver->td_end + 1) &&
+           after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
+               /*
+@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                       "nf_ct_tcp: %s ",
+                       before(seq, sender->td_maxend + 1) ?
+-                      after(end, sender->td_end - receiver->td_maxwin - 1) ?
++                      in_recv_win ?
+                       before(sack, receiver->td_end + 1) ?
+                       after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
+                       : "ACK is under the lower bound (possible overly delayed ACK)"
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index bd700b4..4a3dc61 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
+ static int nf_conntrack_standalone_init_sysctl(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
+                       GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
+index 902fb0a..87f7fdb 100644
+--- a/net/netfilter/nf_conntrack_timestamp.c
++++ b/net/netfilter/nf_conntrack_timestamp.c
+@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+                       GFP_KERNEL);
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 3b18dd1..f79e0ca 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
+ #ifdef CONFIG_SYSCTL
+ static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
+-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
++static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
+ static int nf_log_proc_dostring(ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
+               rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
+               mutex_unlock(&nf_log_mutex);
+       } else {
++              ctl_table_no_const nf_log_table = *table;
++
+               mutex_lock(&nf_log_mutex);
+               logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
+                                                  lockdep_is_held(&nf_log_mutex));
+               if (!logger)
+-                      table->data = "NONE";
++                      nf_log_table.data = "NONE";
+               else
+-                      table->data = logger->name;
+-              r = proc_dostring(table, write, buffer, lenp, ppos);
++                      nf_log_table.data = logger->name;
++              r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
+               mutex_unlock(&nf_log_mutex);
+       }
+diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
+index f042ae5..30ea486 100644
+--- a/net/netfilter/nf_sockopt.c
++++ b/net/netfilter/nf_sockopt.c
+@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
+               }
+       }
+-      list_add(&reg->list, &nf_sockopts);
++      pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
+ out:
+       mutex_unlock(&nf_sockopt_mutex);
+       return ret;
+@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
+ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
+ {
+       mutex_lock(&nf_sockopt_mutex);
+-      list_del(&reg->list);
++      pax_list_del((struct list_head *)&reg->list);
+       mutex_unlock(&nf_sockopt_mutex);
+ }
+ EXPORT_SYMBOL(nf_unregister_sockopt);
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index 962e979..e46f350 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
+ struct nfnl_log_net {
+       spinlock_t instances_lock;
+       struct hlist_head instance_table[INSTANCE_BUCKETS];
+-      atomic_t global_seq;
++      atomic_unchecked_t global_seq;
+ };
+ static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
+@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = htons(inst->group_num);
++      memset(&pmsg, 0, sizeof(pmsg));
+       pmsg.hw_protocol        = skb->protocol;
+       pmsg.hook               = hooknum;
+@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
+       if (indev && skb->dev &&
+           skb->mac_header != skb->network_header) {
+               struct nfulnl_msg_packet_hw phw;
+-              int len = dev_parse_header(skb, phw.hw_addr);
++              int len;
++
++              memset(&phw, 0, sizeof(phw));
++              len = dev_parse_header(skb, phw.hw_addr);
+               if (len > 0) {
+                       phw.hw_addrlen = htons(len);
+                       if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
+@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
+       /* global sequence number */
+       if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
+           nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
+-                       htonl(atomic_inc_return(&log->global_seq))))
++                       htonl(atomic_inc_return_unchecked(&log->global_seq))))
+               goto nla_put_failure;
+       if (data_len) {
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index 5352b2d..e0083ce 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
+       if (indev && entskb->dev &&
+           entskb->mac_header != entskb->network_header) {
+               struct nfqnl_msg_packet_hw phw;
+-              int len = dev_parse_header(entskb, phw.hw_addr);
++              int len;
++
++              memset(&phw, 0, sizeof(phw));
++              len = dev_parse_header(entskb, phw.hw_addr);
+               if (len) {
+                       phw.hw_addrlen = htons(len);
+                       if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
+diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
+index 7011c71..6113cc7 100644
+--- a/net/netfilter/xt_TCPMSS.c
++++ b/net/netfilter/xt_TCPMSS.c
+@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ {
+       const struct xt_tcpmss_info *info = par->targinfo;
+       struct tcphdr *tcph;
+-      unsigned int tcplen, i;
++      int len, tcp_hdrlen;
++      unsigned int i;
+       __be16 oldval;
+       u16 newmss;
+       u8 *opt;
+@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+       if (!skb_make_writable(skb, skb->len))
+               return -1;
+-      tcplen = skb->len - tcphoff;
++      len = skb->len - tcphoff;
++      if (len < (int)sizeof(struct tcphdr))
++              return -1;
++
+       tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
++      tcp_hdrlen = tcph->doff * 4;
+-      /* Header cannot be larger than the packet */
+-      if (tcplen < tcph->doff*4)
++      if (len < tcp_hdrlen)
+               return -1;
+       if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+               newmss = info->mss;
+       opt = (u_int8_t *)tcph;
+-      for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
+-              if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
+-                  opt[i+1] == TCPOLEN_MSS) {
++      for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
++              if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
+                       u_int16_t oldmss;
+                       oldmss = (opt[i+2] << 8) | opt[i+3];
+@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+       }
+       /* There is data after the header so the option can't be added
+-         without moving it, and doing so may make the SYN packet
+-         itself too large. Accept the packet unmodified instead. */
+-      if (tcplen > tcph->doff*4)
++       * without moving it, and doing so may make the SYN packet
++       * itself too large. Accept the packet unmodified instead.
++       */
++      if (len > tcp_hdrlen)
+               return 0;
+       /*
+@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+               newmss = min(newmss, (u16)1220);
+       opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+-      memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
++      memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
+       inet_proto_csum_replace2(&tcph->check, skb,
+-                               htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
++                               htons(len), htons(len + TCPOLEN_MSS), 1);
+       opt[0] = TCPOPT_MSS;
+       opt[1] = TCPOLEN_MSS;
+       opt[2] = (newmss & 0xff00) >> 8;
+diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
+index b68fa19..625fa1d 100644
+--- a/net/netfilter/xt_TCPOPTSTRIP.c
++++ b/net/netfilter/xt_TCPOPTSTRIP.c
+@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
+       struct tcphdr *tcph;
+       u_int16_t n, o;
+       u_int8_t *opt;
+-      int len;
++      int len, tcp_hdrlen;
+       /* This is a fragment, no TCP header is available */
+       if (par->fragoff != 0)
+@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
+               return NF_DROP;
+       tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+-      if (tcph->doff * 4 > len)
++      tcp_hdrlen = tcph->doff * 4;
++
++      if (len < tcp_hdrlen)
+               return NF_DROP;
+       opt  = (u_int8_t *)tcph;
+@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
+        * Walk through all TCP options - if we find some option to remove,
+        * set all octets to %TCPOPT_NOP and adjust checksum.
+        */
+-      for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
++      for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
+               optl = optlen(opt, i);
+-              if (i + optl > tcp_hdrlen(skb))
++              if (i + optl > tcp_hdrlen)
+                       break;
+               if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
+diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
+new file mode 100644
+index 0000000..c566332
+--- /dev/null
++++ b/net/netfilter/xt_gradm.c
+@@ -0,0 +1,51 @@
++/*
++ *    gradm match for netfilter
++ *    Copyright Â© Zbigniew Krzystolik, 2010
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License; either version
++ *    2 or 3 as published by the Free Software Foundation.
++ */
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/grsecurity.h>
++#include <linux/netfilter/xt_gradm.h>
++
++static bool
++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
++{
++      const struct xt_gradm_mtinfo *info = par->matchinfo;
++      bool retval = false;
++      if (gr_acl_is_enabled())
++              retval = true;
++      return retval ^ info->invflags;
++}
++
++static struct xt_match gradm_mt_reg __read_mostly = {
++              .name       = "gradm",
++              .revision   = 0,
++              .family     = NFPROTO_UNSPEC,
++              .match      = gradm_mt,
++              .matchsize  = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
++              .me         = THIS_MODULE,
++};
++
++static int __init gradm_mt_init(void)
++{
++      return xt_register_match(&gradm_mt_reg);
++}
++
++static void __exit gradm_mt_exit(void)
++{
++      xt_unregister_match(&gradm_mt_reg);
++}
++
++module_init(gradm_mt_init);
++module_exit(gradm_mt_exit);
++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_gradm");
++MODULE_ALIAS("ip6t_gradm");
+diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
+index 4fe4fb4..87a89e5 100644
+--- a/net/netfilter/xt_statistic.c
++++ b/net/netfilter/xt_statistic.c
+@@ -19,7 +19,7 @@
+ #include <linux/module.h>
+ struct xt_statistic_priv {
+-      atomic_t count;
++      atomic_unchecked_t count;
+ } ____cacheline_aligned_in_smp;
+ MODULE_LICENSE("GPL");
+@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
+               break;
+       case XT_STATISTIC_MODE_NTH:
+               do {
+-                      oval = atomic_read(&info->master->count);
++                      oval = atomic_read_unchecked(&info->master->count);
+                       nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+-              } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
++              } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
+               if (nval == 0)
+                       ret = !ret;
+               break;
+@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
+       info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
+       if (info->master == NULL)
+               return -ENOMEM;
+-      atomic_set(&info->master->count, info->u.nth.count);
++      atomic_set_unchecked(&info->master->count, info->u.nth.count);
+       return 0;
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 57ee84d..8b99cf5 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -121,7 +121,7 @@ static void netlink_overrun(struct sock *sk)
+                       sk->sk_error_report(sk);
+               }
+       }
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+ }
+ static void netlink_rcv_wake(struct sock *sk)
+@@ -2771,7 +2771,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+                          sk_wmem_alloc_get(s),
+                          nlk->cb,
+                          atomic_read(&s->sk_refcnt),
+-                         atomic_read(&s->sk_drops),
++                         atomic_read_unchecked(&s->sk_drops),
+                          sock_i_ino(s)
+                       );
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 1076fe1..f190285 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
+               goto errout;
+       }
++      pax_open_kernel();
+       if (ops->dumpit)
+-              ops->flags |= GENL_CMD_CAP_DUMP;
++              *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DUMP;
+       if (ops->doit)
+-              ops->flags |= GENL_CMD_CAP_DO;
++              *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DO;
+       if (ops->policy)
+-              ops->flags |= GENL_CMD_CAP_HASPOL;
++              *(unsigned int *)&ops->flags |= GENL_CMD_CAP_HASPOL;
++      pax_close_kernel();
+       genl_lock_all();
+-      list_add_tail(&ops->ops_list, &family->ops_list);
++      pax_list_add_tail((struct list_head *)&ops->ops_list, &family->ops_list);
+       genl_unlock_all();
+-      genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
++      genl_ctrl_event(CTRL_CMD_NEWOPS, (void *)ops);
+       err = 0;
+ errout:
+       return err;
+@@ -351,9 +353,9 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
+       genl_lock_all();
+       list_for_each_entry(rc, &family->ops_list, ops_list) {
+               if (rc == ops) {
+-                      list_del(&ops->ops_list);
++                      pax_list_del((struct list_head *)&ops->ops_list);
+                       genl_unlock_all();
+-                      genl_ctrl_event(CTRL_CMD_DELOPS, ops);
++                      genl_ctrl_event(CTRL_CMD_DELOPS, (void *)ops);
+                       return 0;
+               }
+       }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index ec0c80f..41e1830 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -850,7 +850,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
+               *uaddr_len = sizeof(struct full_sockaddr_ax25);
+       } else {
+               sax->fsa_ax25.sax25_family = AF_NETROM;
+-              sax->fsa_ax25.sax25_ndigis = 0;
+               sax->fsa_ax25.sax25_call   = nr->source_addr;
+               *uaddr_len = sizeof(struct sockaddr_ax25);
+       }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 20a1bd0..bb8f1c1 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1681,7 +1681,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.stats1.tp_packets++;
+-      skb->dropcount = atomic_read(&sk->sk_drops);
++      skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       spin_unlock(&sk->sk_receive_queue.lock);
+       sk->sk_data_ready(sk, skb->len);
+@@ -1690,7 +1690,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ drop_n_acct:
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.stats1.tp_drops++;
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+       spin_unlock(&sk->sk_receive_queue.lock);
+ drop_n_restore:
+@@ -2640,6 +2640,7 @@ out:
+ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ {
++      struct sock_extended_err ee;
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb, *skb2;
+       int copied, err;
+@@ -2661,8 +2662,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+       sock_recv_timestamp(msg, sk, skb);
+       serr = SKB_EXT_ERR(skb);
++      ee = serr->ee;
+       put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+-               sizeof(serr->ee), &serr->ee);
++               sizeof ee, &ee);
+       msg->msg_flags |= MSG_ERRQUEUE;
+       err = copied;
+@@ -3281,7 +3283,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+       case PACKET_HDRLEN:
+               if (len > sizeof(int))
+                       len = sizeof(int);
+-              if (copy_from_user(&val, optval, len))
++              if (len > sizeof(val) || copy_from_user(&val, optval, len))
+                       return -EFAULT;
+               switch (val) {
+               case TPACKET_V1:
+@@ -3324,7 +3326,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+               len = lv;
+       if (put_user(len, optlen))
+               return -EFAULT;
+-      if (copy_to_user(optval, data, len))
++      if (len > sizeof(st) || copy_to_user(optval, data, len))
+               return -EFAULT;
+       return 0;
+ }
+diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
+index 5a940db..d6a502d 100644
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
+ {
+       int err = 0;
+-      if (protocol >= PHONET_NPROTO)
++      if (protocol < 0 || protocol >= PHONET_NPROTO)
+               return -EINVAL;
+       err = proto_register(pp->prot, 1);
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index e774117..900b8b7 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
+       case PNS_PEP_CTRL_REQ:
+               if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       break;
+               }
+               __skb_pull(skb, 4);
+@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
+               }
+               if (pn->rx_credits == 0) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       err = -ENOBUFS;
+                       break;
+               }
+@@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
+               }
+               if (pn->rx_credits == 0) {
+-                      atomic_inc(&sk->sk_drops);
++                      atomic_inc_unchecked(&sk->sk_drops);
+                       err = NET_RX_DROP;
+                       break;
+               }
+diff --git a/net/phonet/socket.c b/net/phonet/socket.c
+index 1afd138..0b42453 100644
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -612,7 +612,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
+                       from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                       sock_i_ino(sk),
+                       atomic_read(&sk->sk_refcnt), sk,
+-                      atomic_read(&sk->sk_drops), &len);
++                      atomic_read_unchecked(&sk->sk_drops), &len);
+       }
+       seq_printf(seq, "%*s\n", 127 - len, "");
+       return 0;
+diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
+index d6bbbbd..61561e4 100644
+--- a/net/phonet/sysctl.c
++++ b/net/phonet/sysctl.c
+@@ -67,7 +67,7 @@ static int proc_local_port_range(ctl_table *table, int write,
+ {
+       int ret;
+       int range[2] = {local_port_range[0], local_port_range[1]};
+-      ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &range,
+               .maxlen = sizeof(range),
+               .mode = table->mode,
+diff --git a/net/rds/cong.c b/net/rds/cong.c
+index e5b65ac..f3b6fb7 100644
+--- a/net/rds/cong.c
++++ b/net/rds/cong.c
+@@ -78,7 +78,7 @@
+  * finds that the saved generation number is smaller than the global generation
+  * number, it wakes up the process.
+  */
+-static atomic_t               rds_cong_generation = ATOMIC_INIT(0);
++static atomic_unchecked_t             rds_cong_generation = ATOMIC_INIT(0);
+ /*
+  * Congestion monitoring
+@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
+       rdsdebug("waking map %p for %pI4\n",
+         map, &map->m_addr);
+       rds_stats_inc(s_cong_update_received);
+-      atomic_inc(&rds_cong_generation);
++      atomic_inc_unchecked(&rds_cong_generation);
+       if (waitqueue_active(&map->m_waitq))
+               wake_up(&map->m_waitq);
+       if (waitqueue_active(&rds_poll_waitq))
+@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
+ int rds_cong_updated_since(unsigned long *recent)
+ {
+-      unsigned long gen = atomic_read(&rds_cong_generation);
++      unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
+       if (likely(*recent == gen))
+               return 0;
+diff --git a/net/rds/ib.h b/net/rds/ib.h
+index 7280ab8..e04f4ea 100644
+--- a/net/rds/ib.h
++++ b/net/rds/ib.h
+@@ -128,7 +128,7 @@ struct rds_ib_connection {
+       /* sending acks */
+       unsigned long           i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+-      atomic64_t              i_ack_next;     /* next ACK to send */
++      atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
+ #else
+       spinlock_t              i_ack_lock;     /* protect i_ack_next */
+       u64                     i_ack_next;     /* next ACK to send */
+diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
+index 31b74f5..dc1fbfa 100644
+--- a/net/rds/ib_cm.c
++++ b/net/rds/ib_cm.c
+@@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
+       /* Clear the ACK state */
+       clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+-      atomic64_set(&ic->i_ack_next, 0);
++      atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+       ic->i_ack_next = 0;
+ #endif
+diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
+index 8eb9501..0c386ff 100644
+--- a/net/rds/ib_recv.c
++++ b/net/rds/ib_recv.c
+@@ -597,7 +597,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
+ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
+                               int ack_required)
+ {
+-      atomic64_set(&ic->i_ack_next, seq);
++      atomic64_set_unchecked(&ic->i_ack_next, seq);
+       if (ack_required) {
+               smp_mb__before_clear_bit();
+               set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -609,7 +609,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
+       clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+       smp_mb__after_clear_bit();
+-      return atomic64_read(&ic->i_ack_next);
++      return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+diff --git a/net/rds/iw.h b/net/rds/iw.h
+index 04ce3b1..48119a6 100644
+--- a/net/rds/iw.h
++++ b/net/rds/iw.h
+@@ -134,7 +134,7 @@ struct rds_iw_connection {
+       /* sending acks */
+       unsigned long           i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+-      atomic64_t              i_ack_next;     /* next ACK to send */
++      atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
+ #else
+       spinlock_t              i_ack_lock;     /* protect i_ack_next */
+       u64                     i_ack_next;     /* next ACK to send */
+diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
+index a91e1db..cf3053f 100644
+--- a/net/rds/iw_cm.c
++++ b/net/rds/iw_cm.c
+@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
+       /* Clear the ACK state */
+       clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+-      atomic64_set(&ic->i_ack_next, 0);
++      atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+       ic->i_ack_next = 0;
+ #endif
+diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
+index 4503335..db566b4 100644
+--- a/net/rds/iw_recv.c
++++ b/net/rds/iw_recv.c
+@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+                               int ack_required)
+ {
+-      atomic64_set(&ic->i_ack_next, seq);
++      atomic64_set_unchecked(&ic->i_ack_next, seq);
+       if (ack_required) {
+               smp_mb__before_clear_bit();
+               set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+       clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+       smp_mb__after_clear_bit();
+-      return atomic64_read(&ic->i_ack_next);
++      return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+diff --git a/net/rds/rds.h b/net/rds/rds.h
+index ec1d731..90a3a8d 100644
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -449,7 +449,7 @@ struct rds_transport {
+       void (*sync_mr)(void *trans_private, int direction);
+       void (*free_mr)(void *trans_private, int invalidate);
+       void (*flush_mrs)(void);
+-};
++} __do_const;
+ struct rds_sock {
+       struct sock             rs_sk;
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index edac9ef..16bcb98 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
+       int val = 1;
+       set_fs(KERNEL_DS);
+-      sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
++      sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
+                             sizeof(val));
+       set_fs(oldfs);
+ }
+diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
+index 81cf5a4..b5826ff 100644
+--- a/net/rds/tcp_send.c
++++ b/net/rds/tcp_send.c
+@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+-      sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
++      sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
+                             sizeof(val));
+       set_fs(oldfs);
+ }
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index e61aa60..f07cc89 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
+ __be32 rxrpc_epoch;
+ /* current debugging ID */
+-atomic_t rxrpc_debug_id;
++atomic_unchecked_t rxrpc_debug_id;
+ /* count of skbs currently in use */
+ atomic_t rxrpc_n_skbs;
+diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
+index e4d9cbc..b229649 100644
+--- a/net/rxrpc/ar-ack.c
++++ b/net/rxrpc/ar-ack.c
+@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
+       _enter("{%d,%d,%d,%d},",
+              call->acks_hard, call->acks_unacked,
+-             atomic_read(&call->sequence),
++             atomic_read_unchecked(&call->sequence),
+              CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+       stop = 0;
+@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
+                       /* each Tx packet has a new serial number */
+                       sp->hdr.serial =
+-                              htonl(atomic_inc_return(&call->conn->serial));
++                              htonl(atomic_inc_return_unchecked(&call->conn->serial));
+                       hdr = (struct rxrpc_header *) txb->head;
+                       hdr->serial = sp->hdr.serial;
+@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
+  */
+ static void rxrpc_clear_tx_window(struct rxrpc_call *call)
+ {
+-      rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
++      rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
+ }
+ /*
+@@ -629,7 +629,7 @@ process_further:
+               latest = ntohl(sp->hdr.serial);
+               hard = ntohl(ack.firstPacket);
+-              tx = atomic_read(&call->sequence);
++              tx = atomic_read_unchecked(&call->sequence);
+               _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+                      latest,
+@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
+       goto maybe_reschedule;
+ send_ACK_with_skew:
+-      ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
++      ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
+                           ntohl(ack.serial));
+ send_ACK:
+       mtu = call->conn->trans->peer->if_mtu;
+@@ -1173,7 +1173,7 @@ send_ACK:
+       ackinfo.rxMTU   = htonl(5692);
+       ackinfo.jumbo_max = htonl(4);
+-      hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++      hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+       _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+              ntohl(hdr.serial),
+              ntohs(ack.maxSkew),
+@@ -1191,7 +1191,7 @@ send_ACK:
+ send_message:
+       _debug("send message");
+-      hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++      hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+       _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+ send_message_2:
+diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
+index a3bbb36..3341fb9 100644
+--- a/net/rxrpc/ar-call.c
++++ b/net/rxrpc/ar-call.c
+@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+       spin_lock_init(&call->lock);
+       rwlock_init(&call->state_lock);
+       atomic_set(&call->usage, 1);
+-      call->debug_id = atomic_inc_return(&rxrpc_debug_id);
++      call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+       call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+       memset(&call->sock_node, 0xed, sizeof(call->sock_node));
+diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
+index 4106ca9..a338d7a 100644
+--- a/net/rxrpc/ar-connection.c
++++ b/net/rxrpc/ar-connection.c
+@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
+               rwlock_init(&conn->lock);
+               spin_lock_init(&conn->state_lock);
+               atomic_set(&conn->usage, 1);
+-              conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
++              conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+               conn->avail_calls = RXRPC_MAXCALLS;
+               conn->size_align = 4;
+               conn->header_size = sizeof(struct rxrpc_header);
+diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
+index e7ed43a..6afa140 100644
+--- a/net/rxrpc/ar-connevent.c
++++ b/net/rxrpc/ar-connevent.c
+@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+       len = iov[0].iov_len + iov[1].iov_len;
+-      hdr.serial = htonl(atomic_inc_return(&conn->serial));
++      hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+       _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
+index 529572f..c758ca7 100644
+--- a/net/rxrpc/ar-input.c
++++ b/net/rxrpc/ar-input.c
+@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
+       /* track the latest serial number on this connection for ACK packet
+        * information */
+       serial = ntohl(sp->hdr.serial);
+-      hi_serial = atomic_read(&call->conn->hi_serial);
++      hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
+       while (serial > hi_serial)
+-              hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
++              hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
+                                          serial);
+       /* request ACK generation for any ACK or DATA packet that requests
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index a693aca..81e7293 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -272,8 +272,8 @@ struct rxrpc_connection {
+       int                     error;          /* error code for local abort */
+       int                     debug_id;       /* debug ID for printks */
+       unsigned int            call_counter;   /* call ID counter */
+-      atomic_t                serial;         /* packet serial number counter */
+-      atomic_t                hi_serial;      /* highest serial number received */
++      atomic_unchecked_t      serial;         /* packet serial number counter */
++      atomic_unchecked_t      hi_serial;      /* highest serial number received */
+       u8                      avail_calls;    /* number of calls available */
+       u8                      size_align;     /* data size alignment (for security) */
+       u8                      header_size;    /* rxrpc + security header size */
+@@ -346,7 +346,7 @@ struct rxrpc_call {
+       spinlock_t              lock;
+       rwlock_t                state_lock;     /* lock for state transition */
+       atomic_t                usage;
+-      atomic_t                sequence;       /* Tx data packet sequence counter */
++      atomic_unchecked_t      sequence;       /* Tx data packet sequence counter */
+       u32                     abort_code;     /* local/remote abort code */
+       enum {                                  /* current state of call */
+               RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
+  */
+ extern atomic_t rxrpc_n_skbs;
+ extern __be32 rxrpc_epoch;
+-extern atomic_t rxrpc_debug_id;
++extern atomic_unchecked_t rxrpc_debug_id;
+ extern struct workqueue_struct *rxrpc_workqueue;
+ /*
+diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
+index 87f7135..74d3703 100644
+--- a/net/rxrpc/ar-local.c
++++ b/net/rxrpc/ar-local.c
+@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
+               spin_lock_init(&local->lock);
+               rwlock_init(&local->services_lock);
+               atomic_set(&local->usage, 1);
+-              local->debug_id = atomic_inc_return(&rxrpc_debug_id);
++              local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+               memcpy(&local->srx, srx, sizeof(*srx));
+       }
+diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
+index e1ac183..b43e10e 100644
+--- a/net/rxrpc/ar-output.c
++++ b/net/rxrpc/ar-output.c
+@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
+                       sp->hdr.cid = call->cid;
+                       sp->hdr.callNumber = call->call_id;
+                       sp->hdr.seq =
+-                              htonl(atomic_inc_return(&call->sequence));
++                              htonl(atomic_inc_return_unchecked(&call->sequence));
+                       sp->hdr.serial =
+-                              htonl(atomic_inc_return(&conn->serial));
++                              htonl(atomic_inc_return_unchecked(&conn->serial));
+                       sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+                       sp->hdr.userStatus = 0;
+                       sp->hdr.securityIndex = conn->security_ix;
+diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
+index bebaa43..2644591 100644
+--- a/net/rxrpc/ar-peer.c
++++ b/net/rxrpc/ar-peer.c
+@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
+               INIT_LIST_HEAD(&peer->error_targets);
+               spin_lock_init(&peer->lock);
+               atomic_set(&peer->usage, 1);
+-              peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
++              peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+               memcpy(&peer->srx, srx, sizeof(*srx));
+               rxrpc_assess_MTU_size(peer);
+diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
+index 38047f7..9f48511 100644
+--- a/net/rxrpc/ar-proc.c
++++ b/net/rxrpc/ar-proc.c
+@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
+                  atomic_read(&conn->usage),
+                  rxrpc_conn_states[conn->state],
+                  key_serial(conn->key),
+-                 atomic_read(&conn->serial),
+-                 atomic_read(&conn->hi_serial));
++                 atomic_read_unchecked(&conn->serial),
++                 atomic_read_unchecked(&conn->hi_serial));
+       return 0;
+ }
+diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
+index 92df566..87ec1bf 100644
+--- a/net/rxrpc/ar-transport.c
++++ b/net/rxrpc/ar-transport.c
+@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
+               spin_lock_init(&trans->client_lock);
+               rwlock_init(&trans->conn_lock);
+               atomic_set(&trans->usage, 1);
+-              trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
++              trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+               if (peer->srx.transport.family == AF_INET) {
+                       switch (peer->srx.transport_type) {
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index f226709..0e735a8 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+       len = iov[0].iov_len + iov[1].iov_len;
+-      hdr.serial = htonl(atomic_inc_return(&conn->serial));
++      hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+       _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
+       len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+-      hdr->serial = htonl(atomic_inc_return(&conn->serial));
++      hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+       _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 391a245..296b3d7 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -981,7 +981,7 @@ static const struct inet6_protocol sctpv6_protocol = {
+       .flags        = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+-static struct sctp_af sctp_af_inet6 = {
++static struct sctp_af sctp_af_inet6 __read_only = {
+       .sa_family         = AF_INET6,
+       .sctp_xmit         = sctp_v6_xmit,
+       .setsockopt        = ipv6_setsockopt,
+@@ -1013,7 +1013,7 @@ static struct sctp_af sctp_af_inet6 = {
+ #endif
+ };
+-static struct sctp_pf sctp_pf_inet6 = {
++static struct sctp_pf sctp_pf_inet6 __read_only = {
+       .event_msgname = sctp_inet6_event_msgname,
+       .skb_msgname   = sctp_inet6_skb_msgname,
+       .af_supported  = sctp_inet6_af_supported,
+@@ -1038,7 +1038,7 @@ void sctp_v6_pf_init(void)
+ void sctp_v6_pf_exit(void)
+ {
+-      list_del(&sctp_af_inet6.list);
++      pax_list_del(&sctp_af_inet6.list);
+ }
+ /* Initialize IPv6 support and register with socket layer.  */
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index 4e45ee3..e66a031 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -337,7 +337,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+               seq_printf(seq,
+                          "%8pK %8pK %-3d %-3d %-2d %-4d "
+                          "%4d %8d %8d %7d %5lu %-5d %5d ",
+-                         assoc, sk, sctp_sk(sk)->type, sk->sk_state,
++                         assoc, sk,
++                         sctp_sk(sk)->type, sk->sk_state,
+                          assoc->state, hash,
+                          assoc->assoc_id,
+                          assoc->sndbuf_used,
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index eaee00c..97c0afd 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -834,8 +834,10 @@ int sctp_register_af(struct sctp_af *af)
+               return 0;
+       }
++      pax_open_kernel();
+       INIT_LIST_HEAD(&af->list);
+-      list_add_tail(&af->list, &sctp_address_families);
++      pax_close_kernel();
++      pax_list_add_tail(&af->list, &sctp_address_families);
+       return 1;
+ }
+@@ -966,7 +968,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
+ static struct sctp_af sctp_af_inet;
+-static struct sctp_pf sctp_pf_inet = {
++static struct sctp_pf sctp_pf_inet __read_only = {
+       .event_msgname = sctp_inet_event_msgname,
+       .skb_msgname   = sctp_inet_skb_msgname,
+       .af_supported  = sctp_inet_af_supported,
+@@ -1037,7 +1039,7 @@ static const struct net_protocol sctp_protocol = {
+ };
+ /* IPv4 address related functions.  */
+-static struct sctp_af sctp_af_inet = {
++static struct sctp_af sctp_af_inet __read_only = {
+       .sa_family         = AF_INET,
+       .sctp_xmit         = sctp_v4_xmit,
+       .setsockopt        = ip_setsockopt,
+@@ -1122,7 +1124,7 @@ static void sctp_v4_pf_init(void)
+ static void sctp_v4_pf_exit(void)
+ {
+-      list_del(&sctp_af_inet.list);
++      pax_list_del(&sctp_af_inet.list);
+ }
+ static int sctp_v4_protosw_init(void)
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 8aab894..f6b7e7d 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -447,7 +447,7 @@ static void sctp_generate_sack_event(unsigned long data)
+       sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
+ }
+-sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
++sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
+       NULL,
+       sctp_generate_t1_cookie_event,
+       sctp_generate_t1_init_event,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 6abb1ca..1678f8b 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2167,11 +2167,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
+ {
+       struct sctp_association *asoc;
+       struct sctp_ulpevent *event;
++      struct sctp_event_subscribe subscribe;
+       if (optlen > sizeof(struct sctp_event_subscribe))
+               return -EINVAL;
+-      if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
++      if (copy_from_user(&subscribe, optval, optlen))
+               return -EFAULT;
++      sctp_sk(sk)->subscribe = subscribe;
+       /*
+        * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+@@ -4222,13 +4224,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+                                 int __user *optlen)
+ {
++      struct sctp_event_subscribe subscribe;
++
+       if (len <= 0)
+               return -EINVAL;
+       if (len > sizeof(struct sctp_event_subscribe))
+               len = sizeof(struct sctp_event_subscribe);
+       if (put_user(len, optlen))
+               return -EFAULT;
+-      if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
++      subscribe = sctp_sk(sk)->subscribe;
++      if (copy_to_user(optval, &subscribe, len))
+               return -EFAULT;
+       return 0;
+ }
+@@ -4246,6 +4251,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+  */
+ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
+ {
++      __u32 autoclose;
++
+       /* Applicable to UDP-style socket only */
+       if (sctp_style(sk, TCP))
+               return -EOPNOTSUPP;
+@@ -4254,7 +4261,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
+       len = sizeof(int);
+       if (put_user(len, optlen))
+               return -EFAULT;
+-      if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
++      autoclose = sctp_sk(sk)->autoclose;
++      if (copy_to_user(optval, &autoclose, sizeof(int)))
+               return -EFAULT;
+       return 0;
+ }
+@@ -4626,12 +4634,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
+  */
+ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
+ {
++      struct sctp_initmsg initmsg;
++
+       if (len < sizeof(struct sctp_initmsg))
+               return -EINVAL;
+       len = sizeof(struct sctp_initmsg);
+       if (put_user(len, optlen))
+               return -EFAULT;
+-      if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
++      initmsg = sctp_sk(sk)->initmsg;
++      if (copy_to_user(optval, &initmsg, len))
+               return -EFAULT;
+       return 0;
+ }
+@@ -4672,6 +4683,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
+               addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+               if (space_left < addrlen)
+                       return -ENOMEM;
++              if (addrlen > sizeof(temp) || addrlen < 0)
++                      return -EFAULT;
+               if (copy_to_user(to, &temp, addrlen))
+                       return -EFAULT;
+               to += addrlen;
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index bf3c6e8..376d8d0 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -307,7 +307,7 @@ static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+ {
+       struct net *net = current->nsproxy->net_ns;
+       char tmp[8];
+-      ctl_table tbl;
++      ctl_table_no_const tbl;
+       int ret;
+       int changed = 0;
+       char *none = "none";
+@@ -350,7 +350,7 @@ static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+ int sctp_sysctl_net_register(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       int i;
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 098f1d5f..60da2f7 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -178,12 +178,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
+ {
+       SCTP_ASSERT(transport->dead, "Transport is not dead", return);
+-      call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
+-
+       sctp_packet_free(&transport->packet);
+       if (transport->asoc)
+               sctp_association_put(transport->asoc);
++
++      call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
+ }
+ /* Start T3_rtx timer if it is not already running and update the heartbeat
+diff --git a/net/socket.c b/net/socket.c
+index 4ca1526..df83e47 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -88,6 +88,7 @@
+ #include <linux/magic.h>
+ #include <linux/slab.h>
+ #include <linux/xattr.h>
++#include <linux/in.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -105,6 +106,8 @@
+ #include <linux/sockios.h>
+ #include <linux/atalk.h>
++#include <linux/grsock.h>
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t pos);
+@@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+               &sockfs_dentry_operations, SOCKFS_MAGIC);
+ }
+-static struct vfsmount *sock_mnt __read_mostly;
++struct vfsmount *sock_mnt __read_mostly;
+ static struct file_system_type sock_fs_type = {
+       .name =         "sockfs",
+@@ -1246,6 +1249,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
+               return -EAFNOSUPPORT;
+       if (type < 0 || type >= SOCK_MAX)
+               return -EINVAL;
++      if (protocol < 0)
++              return -EINVAL;
+       /* Compatibility.
+@@ -1377,6 +1382,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+       if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+               flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
++      if(!gr_search_socket(family, type, protocol)) {
++              retval = -EACCES;
++              goto out;
++      }
++
++      if (gr_handle_sock_all(family, type, protocol)) {
++              retval = -EACCES;
++              goto out;
++      }
++
+       retval = sock_create(family, type, protocol, &sock);
+       if (retval < 0)
+               goto out;
+@@ -1504,6 +1519,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+       if (sock) {
+               err = move_addr_to_kernel(umyaddr, addrlen, &address);
+               if (err >= 0) {
++                      if (gr_handle_sock_server((struct sockaddr *)&address)) {
++                              err = -EACCES;
++                              goto error;
++                      }
++                      err = gr_search_bind(sock, (struct sockaddr_in *)&address);
++                      if (err)
++                              goto error;
++
+                       err = security_socket_bind(sock,
+                                                  (struct sockaddr *)&address,
+                                                  addrlen);
+@@ -1512,6 +1535,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+                                                     (struct sockaddr *)
+                                                     &address, addrlen);
+               }
++error:
+               fput_light(sock->file, fput_needed);
+       }
+       return err;
+@@ -1535,10 +1559,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+               if ((unsigned int)backlog > somaxconn)
+                       backlog = somaxconn;
++              if (gr_handle_sock_server_other(sock->sk)) {
++                      err = -EPERM;
++                      goto error;
++              }
++
++              err = gr_search_listen(sock);
++              if (err)
++                      goto error;
++
+               err = security_socket_listen(sock, backlog);
+               if (!err)
+                       err = sock->ops->listen(sock, backlog);
++error:
+               fput_light(sock->file, fput_needed);
+       }
+       return err;
+@@ -1582,6 +1616,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+       newsock->type = sock->type;
+       newsock->ops = sock->ops;
++      if (gr_handle_sock_server_other(sock->sk)) {
++              err = -EPERM;
++              sock_release(newsock);
++              goto out_put;
++      }
++
++      err = gr_search_accept(sock);
++      if (err) {
++              sock_release(newsock);
++              goto out_put;
++      }
++
+       /*
+        * We don't need try_module_get here, as the listening socket (sock)
+        * has the protocol module (sock->ops->owner) held.
+@@ -1627,6 +1673,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+       fd_install(newfd, newfile);
+       err = newfd;
++      gr_attach_curr_ip(newsock->sk);
++
+ out_put:
+       fput_light(sock->file, fput_needed);
+ out:
+@@ -1659,6 +1707,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+               int, addrlen)
+ {
+       struct socket *sock;
++      struct sockaddr *sck;
+       struct sockaddr_storage address;
+       int err, fput_needed;
+@@ -1669,6 +1718,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+       if (err < 0)
+               goto out_put;
++      sck = (struct sockaddr *)&address;
++
++      if (gr_handle_sock_client(sck)) {
++              err = -EACCES;
++              goto out_put;
++      }
++
++      err = gr_search_connect(sock, (struct sockaddr_in *)sck);
++      if (err)
++              goto out_put;
++
+       err =
+           security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+       if (err)
+@@ -1750,6 +1810,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+  *    the protocol.
+  */
++asmlinkage long sys_sendto(int, void *, size_t, unsigned, struct sockaddr *, int);
++
+ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+               unsigned int, flags, struct sockaddr __user *, addr,
+               int, addr_len)
+@@ -1816,7 +1878,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+       struct socket *sock;
+       struct iovec iov;
+       struct msghdr msg;
+-      struct sockaddr_storage address;
++      struct sockaddr_storage address = { };
+       int err, err2;
+       int fput_needed;
+@@ -2023,7 +2085,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+                * checking falls down on this.
+                */
+               if (copy_from_user(ctl_buf,
+-                                 (void __user __force *)msg_sys->msg_control,
++                                 (void __force_user *)msg_sys->msg_control,
+                                  ctl_len))
+                       goto out_freectl;
+               msg_sys->msg_control = ctl_buf;
+@@ -2174,7 +2236,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+       int err, total_len, len;
+       /* kernel mode address */
+-      struct sockaddr_storage addr;
++      struct sockaddr_storage addr = { };
+       /* user mode address pointers */
+       struct sockaddr __user *uaddr;
+@@ -2202,7 +2264,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+        *      kernel msghdr to use the kernel address space)
+        */
+-      uaddr = (__force void __user *)msg_sys->msg_name;
++      uaddr = (void __force_user *)msg_sys->msg_name;
+       uaddr_len = COMPAT_NAMELEN(msg);
+       if (MSG_CMSG_COMPAT & flags) {
+               err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+@@ -2955,7 +3017,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               err = dev_ioctl(net, cmd,
+-                              (struct ifreq __user __force *) &kifr);
++                              (struct ifreq __force_user *) &kifr);
+               set_fs(old_fs);
+               return err;
+@@ -3064,7 +3126,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
++      err = dev_ioctl(net, cmd, (void  __force_user *)&ifr);
+       set_fs(old_fs);
+       if (cmd == SIOCGIFMAP && !err) {
+@@ -3169,7 +3231,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
+               ret |= __get_user(rtdev, &(ur4->rt_dev));
+               if (rtdev) {
+                       ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
+-                      r4.rt_dev = (char __user __force *)devname;
++                      r4.rt_dev = (char __force_user *)devname;
+                       devname[15] = 0;
+               } else
+                       r4.rt_dev = NULL;
+@@ -3395,8 +3457,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
+       int __user *uoptlen;
+       int err;
+-      uoptval = (char __user __force *) optval;
+-      uoptlen = (int __user __force *) optlen;
++      uoptval = (char __force_user *) optval;
++      uoptlen = (int __force_user *) optlen;
+       set_fs(KERNEL_DS);
+       if (level == SOL_SOCKET)
+@@ -3416,7 +3478,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
+       char __user *uoptval;
+       int err;
+-      uoptval = (char __user __force *) optval;
++      uoptval = (char __force_user *) optval;
+       set_fs(KERNEL_DS);
+       if (level == SOL_SOCKET)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 426f8fc..1ef9c32 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1288,7 +1288,9 @@ call_start(struct rpc_task *task)
+                       (RPC_IS_ASYNC(task) ? "async" : "sync"));
+       /* Increment call count */
+-      task->tk_msg.rpc_proc->p_count++;
++      pax_open_kernel();
++      (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
++      pax_close_kernel();
+       clnt->cl_stats->rpccnt++;
+       task->tk_action = call_reserve;
+ }
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 5356b12..c0f4c29 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *word)
+ #ifdef RPC_DEBUG
+ static void rpc_task_set_debuginfo(struct rpc_task *task)
+ {
+-      static atomic_t rpc_pid;
++      static atomic_unchecked_t rpc_pid;
+-      task->tk_pid = atomic_inc_return(&rpc_pid);
++      task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
+ }
+ #else
+ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 89a588b..678ed90 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -740,7 +740,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+               __module_get(serv->sv_module);
+               task = kthread_create_on_node(serv->sv_function, rqstp,
+-                                            node, serv->sv_name);
++                                            node, "%s", serv->sv_name);
+               if (IS_ERR(task)) {
+                       error = PTR_ERR(task);
+                       module_put(serv->sv_module);
+@@ -1160,7 +1160,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+       svc_putnl(resv, RPC_SUCCESS);
+       /* Bump per-procedure stats counter */
+-      procp->pc_count++;
++      pax_open_kernel();
++      (*(unsigned int *)&procp->pc_count)++;
++      pax_close_kernel();
+       /* Initialize storage for argp and resp */
+       memset(rqstp->rq_argp, 0, procp->pc_argsize);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 8343737..677025e 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
+ static unsigned int min_max_inline = 4096;
+ static unsigned int max_max_inline = 65536;
+-atomic_t rdma_stat_recv;
+-atomic_t rdma_stat_read;
+-atomic_t rdma_stat_write;
+-atomic_t rdma_stat_sq_starve;
+-atomic_t rdma_stat_rq_starve;
+-atomic_t rdma_stat_rq_poll;
+-atomic_t rdma_stat_rq_prod;
+-atomic_t rdma_stat_sq_poll;
+-atomic_t rdma_stat_sq_prod;
++atomic_unchecked_t rdma_stat_recv;
++atomic_unchecked_t rdma_stat_read;
++atomic_unchecked_t rdma_stat_write;
++atomic_unchecked_t rdma_stat_sq_starve;
++atomic_unchecked_t rdma_stat_rq_starve;
++atomic_unchecked_t rdma_stat_rq_poll;
++atomic_unchecked_t rdma_stat_rq_prod;
++atomic_unchecked_t rdma_stat_sq_poll;
++atomic_unchecked_t rdma_stat_sq_prod;
+ /* Temporary NFS request map and context caches */
+ struct kmem_cache *svc_rdma_map_cachep;
+@@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
+               len -= *ppos;
+               if (len > *lenp)
+                       len = *lenp;
+-              if (len && copy_to_user(buffer, str_buf, len))
++              if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
+                       return -EFAULT;
+               *lenp = len;
+               *ppos += len;
+@@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
+       {
+               .procname       = "rdma_stat_read",
+               .data           = &rdma_stat_read,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_recv",
+               .data           = &rdma_stat_recv,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_write",
+               .data           = &rdma_stat_write,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_sq_starve",
+               .data           = &rdma_stat_sq_starve,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_rq_starve",
+               .data           = &rdma_stat_rq_starve,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_rq_poll",
+               .data           = &rdma_stat_rq_poll,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_rq_prod",
+               .data           = &rdma_stat_rq_prod,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_sq_poll",
+               .data           = &rdma_stat_sq_poll,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+       {
+               .procname       = "rdma_stat_sq_prod",
+               .data           = &rdma_stat_sq_prod,
+-              .maxlen         = sizeof(atomic_t),
++              .maxlen         = sizeof(atomic_unchecked_t),
+               .mode           = 0644,
+               .proc_handler   = read_reset_stat,
+       },
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 0ce7552..d074459 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -501,7 +501,7 @@ next_sge:
+                       svc_rdma_put_context(ctxt, 0);
+                       goto out;
+               }
+-              atomic_inc(&rdma_stat_read);
++              atomic_inc_unchecked(&rdma_stat_read);
+               if (read_wr.num_sge < chl_map->ch[ch_no].count) {
+                       chl_map->ch[ch_no].count -= read_wr.num_sge;
+@@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+                                 dto_q);
+               list_del_init(&ctxt->dto_q);
+       } else {
+-              atomic_inc(&rdma_stat_rq_starve);
++              atomic_inc_unchecked(&rdma_stat_rq_starve);
+               clear_bit(XPT_DATA, &xprt->xpt_flags);
+               ctxt = NULL;
+       }
+@@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+       dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
+               ctxt, rdma_xprt, rqstp, ctxt->wc_status);
+       BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
+-      atomic_inc(&rdma_stat_recv);
++      atomic_inc_unchecked(&rdma_stat_recv);
+       /* Build up the XDR from the receive buffers. */
+       rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index c1d124d..acfc59e 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
+       write_wr.wr.rdma.remote_addr = to;
+       /* Post It */
+-      atomic_inc(&rdma_stat_write);
++      atomic_inc_unchecked(&rdma_stat_write);
+       if (svc_rdma_send(xprt, &write_wr))
+               goto err;
+       return 0;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 62e4f9b..dd3f2d7 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
+               return;
+       ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
+-      atomic_inc(&rdma_stat_rq_poll);
++      atomic_inc_unchecked(&rdma_stat_rq_poll);
+       while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
+               ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
+@@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
+       }
+       if (ctxt)
+-              atomic_inc(&rdma_stat_rq_prod);
++              atomic_inc_unchecked(&rdma_stat_rq_prod);
+       set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+       /*
+@@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
+               return;
+       ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
+-      atomic_inc(&rdma_stat_sq_poll);
++      atomic_inc_unchecked(&rdma_stat_sq_poll);
+       while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
+               if (wc.status != IB_WC_SUCCESS)
+                       /* Close the transport */
+@@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
+       }
+       if (ctxt)
+-              atomic_inc(&rdma_stat_sq_prod);
++              atomic_inc_unchecked(&rdma_stat_sq_prod);
+ }
+ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+@@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+               spin_lock_bh(&xprt->sc_lock);
+               if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
+                       spin_unlock_bh(&xprt->sc_lock);
+-                      atomic_inc(&rdma_stat_sq_starve);
++                      atomic_inc_unchecked(&rdma_stat_sq_starve);
+                       /* See if we can opportunistically reap SQ WR to make room */
+                       sq_cq_reap(xprt);
+diff --git a/net/sysctl_net.c b/net/sysctl_net.c
+index 9bc6db0..47ac8c0 100644
+--- a/net/sysctl_net.c
++++ b/net/sysctl_net.c
+@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
+       kgid_t root_gid = make_kgid(net->user_ns, 0);
+       /* Allow network administrator to have same access as root. */
+-      if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
++      if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
+           uid_eq(root_uid, current_uid())) {
+               int mode = (table->mode >> 6) & 7;
+               return (mode << 6) | (mode << 3) | mode;
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index a80feee..2bbbe70 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1201,7 +1201,7 @@ static int link_send_sections_long(struct tipc_port *sender,
+       struct tipc_msg fragm_hdr;
+       struct sk_buff *buf, *buf_chain, *prev;
+       u32 fragm_crs, fragm_rest, hsz, sect_rest;
+-      const unchar *sect_crs;
++      const unchar __user *sect_crs;
+       int curr_sect;
+       u32 fragm_no;
+@@ -1242,7 +1242,7 @@ again:
+               if (!sect_rest) {
+                       sect_rest = msg_sect[++curr_sect].iov_len;
+-                      sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
++                      sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
+               }
+               if (sect_rest < fragm_rest)
+@@ -1261,7 +1261,7 @@ error:
+                       }
+               } else
+                       skb_copy_to_linear_data_offset(buf, fragm_crs,
+-                                                     sect_crs, sz);
++                                                     (const void __force_kernel *)sect_crs, sz);
+               sect_crs += sz;
+               sect_rest -= sz;
+               fragm_crs += sz;
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index f2db8a8..9245aa4 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+                                             msg_sect[cnt].iov_len);
+               else
+                       skb_copy_to_linear_data_offset(*buf, pos,
+-                                                     msg_sect[cnt].iov_base,
++                                                     (const void __force_kernel *)msg_sect[cnt].iov_base,
+                                                      msg_sect[cnt].iov_len);
+               pos += msg_sect[cnt].iov_len;
+       }
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index 6b42d47..2ac24d5 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
+ {
+       struct iovec msg_sect;
+-      msg_sect.iov_base = (void *)&sub->evt;
++      msg_sect.iov_base = (void __force_user *)&sub->evt;
+       msg_sect.iov_len = sizeof(struct tipc_event);
+       sub->evt.event = htohl(event, sub->swap);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 826e099..4fa8c93 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -783,6 +783,12 @@ static struct sock *unix_find_other(struct net *net,
+               err = -ECONNREFUSED;
+               if (!S_ISSOCK(inode->i_mode))
+                       goto put_fail;
++
++              if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
++                      err = -EACCES;
++                      goto put_fail;
++              }
++
+               u = unix_find_socket_byinode(inode);
+               if (!u)
+                       goto put_fail;
+@@ -803,6 +809,13 @@ static struct sock *unix_find_other(struct net *net,
+               if (u) {
+                       struct dentry *dentry;
+                       dentry = unix_sk(u)->path.dentry;
++
++                      if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
++                              err = -EPERM;
++                              sock_put(u);
++                              goto fail;
++                      }
++
+                       if (dentry)
+                               touch_atime(&unix_sk(u)->path);
+               } else
+@@ -836,12 +849,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+        */
+       err = security_path_mknod(&path, dentry, mode, 0);
+       if (!err) {
++              if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
++                      err = -EACCES;
++                      goto out;
++              }
+               err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+               if (!err) {
+                       res->mnt = mntget(path.mnt);
+                       res->dentry = dget(dentry);
++                      gr_handle_create(dentry, path.mnt);
+               }
+       }
++out:
+       done_path_create(&path, dentry);
+       return err;
+ }
+@@ -2324,9 +2343,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+               seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
+                        "Inode Path\n");
+       else {
+-              struct sock *s = v;
++              struct sock *s = v, *peer;
+               struct unix_sock *u = unix_sk(s);
+               unix_state_lock(s);
++              peer = unix_peer(s);
++              unix_state_unlock(s);
++
++              unix_state_double_lock(s, peer);
+               seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
+                       s,
+@@ -2353,8 +2376,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+                       }
+                       for ( ; i < len; i++)
+                               seq_putc(seq, u->addr->name->sun_path[i]);
+-              }
+-              unix_state_unlock(s);
++              } else if (peer)
++                      seq_printf(seq, " P%lu", sock_i_ino(peer));
++
++              unix_state_double_unlock(s, peer);
+               seq_putc(seq, '\n');
+       }
+diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
+index 8800604..0526440 100644
+--- a/net/unix/sysctl_net_unix.c
++++ b/net/unix/sysctl_net_unix.c
+@@ -28,7 +28,7 @@ static ctl_table unix_table[] = {
+ int __net_init unix_sysctl_register(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
+       if (table == NULL)
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 3f77f42..662d89b 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -335,7 +335,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
+       for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
+               struct vsock_sock *vsk;
+               list_for_each_entry(vsk, &vsock_connected_table[i],
+-                                  connected_table);
++                                  connected_table)
+                       fn(sk_vsock(vsk));
+       }
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index c8717c1..08539f5 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+                */
+               /* Support for very large requests */
+-              if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
+-                  (user_length > descr->max_tokens)) {
++              if (user_length > descr->max_tokens) {
+                       /* Allow userspace to GET more than max so
+                        * we can support any size GET requests.
+                        * There is still a limit : -ENOMEM.
+@@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+               }
+       }
+-      if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+-              /*
+-               * If this is a GET, but not NOMAX, it means that the extra
+-               * data is not bounded by userspace, but by max_tokens. Thus
+-               * set the length to max_tokens. This matches the extra data
+-               * allocation.
+-               * The driver should fill it with the number of tokens it
+-               * provided, and it may check iwp->length rather than having
+-               * knowledge of max_tokens. If the driver doesn't change the
+-               * iwp->length, this ioctl just copies back max_token tokens
+-               * filled with zeroes. Hopefully the driver isn't claiming
+-               * them to be valid data.
+-               */
+-              iwp->length = descr->max_tokens;
+-      }
+-
+       err = handler(dev, info, (union iwreq_data *) iwp, extra);
+       iwp->length += essid_compat;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index ea970b8..c68edb9f 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -334,7 +334,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
+ {
+       policy->walk.dead = 1;
+-      atomic_inc(&policy->genid);
++      atomic_inc_unchecked(&policy->genid);
+       del_timer(&policy->polq.hold_timer);
+       xfrm_queue_purge(&policy->polq.hold_queue);
+@@ -659,7 +659,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
+               hlist_add_head(&policy->bydst, chain);
+       xfrm_pol_hold(policy);
+       net->xfrm.policy_count[dir]++;
+-      atomic_inc(&flow_cache_genid);
++      atomic_inc_unchecked(&flow_cache_genid);
+       rt_genid_bump(net);
+       if (delpol) {
+               xfrm_policy_requeue(delpol, policy);
+@@ -1629,7 +1629,7 @@ free_dst:
+       goto out;
+ }
+-static int inline
++static inline int
+ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+ {
+       if (!*target) {
+@@ -1641,7 +1641,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+       return 0;
+ }
+-static int inline
++static inline int
+ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1653,7 +1653,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ #endif
+ }
+-static int inline
++static inline int
+ xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1747,7 +1747,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+       xdst->num_pols = num_pols;
+       memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+-      xdst->policy_genid = atomic_read(&pols[0]->genid);
++      xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
+       return xdst;
+ }
+@@ -2618,7 +2618,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
+               if (xdst->xfrm_genid != dst->xfrm->genid)
+                       return 0;
+               if (xdst->num_pols > 0 &&
+-                  xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
++                  xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
+                       return 0;
+               mtu = dst_mtu(dst->child);
+@@ -2706,8 +2706,11 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+                       dst_ops->link_failure = xfrm_link_failure;
+               if (likely(dst_ops->neigh_lookup == NULL))
+                       dst_ops->neigh_lookup = xfrm_neigh_lookup;
+-              if (likely(afinfo->garbage_collect == NULL))
+-                      afinfo->garbage_collect = xfrm_garbage_collect_deferred;
++              if (likely(afinfo->garbage_collect == NULL)) {
++                      pax_open_kernel();
++                      *(void **)&afinfo->garbage_collect = xfrm_garbage_collect_deferred;
++                      pax_close_kernel();
++              }
+               rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
+       }
+       spin_unlock(&xfrm_policy_afinfo_lock);
+@@ -2761,7 +2764,9 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
+               dst_ops->check = NULL;
+               dst_ops->negative_advice = NULL;
+               dst_ops->link_failure = NULL;
+-              afinfo->garbage_collect = NULL;
++              pax_open_kernel();
++              *(void **)&afinfo->garbage_collect = NULL;
++              pax_close_kernel();
+       }
+       return err;
+ }
+@@ -3144,7 +3149,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
+                              sizeof(pol->xfrm_vec[i].saddr));
+                       pol->xfrm_vec[i].encap_family = mp->new_family;
+                       /* flush bundles */
+-                      atomic_inc(&pol->genid);
++                      atomic_inc_unchecked(&pol->genid);
+               }
+       }
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 78f66fa..9286768 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -177,12 +177,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
+       if (unlikely(afinfo == NULL))
+               return -EAFNOSUPPORT;
+-      typemap = afinfo->type_map;
++      typemap = (const struct xfrm_type **)afinfo->type_map;
+       spin_lock_bh(&xfrm_type_lock);
+-      if (likely(typemap[type->proto] == NULL))
++      if (likely(typemap[type->proto] == NULL)) {
++              pax_open_kernel();
+               typemap[type->proto] = type;
+-      else
++              pax_close_kernel();
++      } else
+               err = -EEXIST;
+       spin_unlock_bh(&xfrm_type_lock);
+       xfrm_state_put_afinfo(afinfo);
+@@ -198,13 +200,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
+       if (unlikely(afinfo == NULL))
+               return -EAFNOSUPPORT;
+-      typemap = afinfo->type_map;
++      typemap = (const struct xfrm_type **)afinfo->type_map;
+       spin_lock_bh(&xfrm_type_lock);
+       if (unlikely(typemap[type->proto] != type))
+               err = -ENOENT;
+-      else
++      else {
++              pax_open_kernel();
+               typemap[type->proto] = NULL;
++              pax_close_kernel();
++      }
+       spin_unlock_bh(&xfrm_type_lock);
+       xfrm_state_put_afinfo(afinfo);
+       return err;
+@@ -214,7 +219,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
+ static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
+ {
+       struct xfrm_state_afinfo *afinfo;
+-      const struct xfrm_type **typemap;
+       const struct xfrm_type *type;
+       int modload_attempted = 0;
+@@ -222,9 +226,8 @@ retry:
+       afinfo = xfrm_state_get_afinfo(family);
+       if (unlikely(afinfo == NULL))
+               return NULL;
+-      typemap = afinfo->type_map;
+-      type = typemap[proto];
++      type = afinfo->type_map[proto];
+       if (unlikely(type && !try_module_get(type->owner)))
+               type = NULL;
+       if (!type && !modload_attempted) {
+@@ -258,7 +261,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
+               return -EAFNOSUPPORT;
+       err = -EEXIST;
+-      modemap = afinfo->mode_map;
++      modemap = (struct xfrm_mode **)afinfo->mode_map;
+       spin_lock_bh(&xfrm_mode_lock);
+       if (modemap[mode->encap])
+               goto out;
+@@ -267,8 +270,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
+       if (!try_module_get(afinfo->owner))
+               goto out;
+-      mode->afinfo = afinfo;
++      pax_open_kernel();
++      *(const void **)&mode->afinfo = afinfo;
+       modemap[mode->encap] = mode;
++      pax_close_kernel();
+       err = 0;
+ out:
+@@ -292,10 +297,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
+               return -EAFNOSUPPORT;
+       err = -ENOENT;
+-      modemap = afinfo->mode_map;
++      modemap = (struct xfrm_mode **)afinfo->mode_map;
+       spin_lock_bh(&xfrm_mode_lock);
+       if (likely(modemap[mode->encap] == mode)) {
++              pax_open_kernel();
+               modemap[mode->encap] = NULL;
++              pax_close_kernel();
+               module_put(mode->afinfo->owner);
+               err = 0;
+       }
+diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
+index 05a6e3d..6716ec9 100644
+--- a/net/xfrm/xfrm_sysctl.c
++++ b/net/xfrm/xfrm_sysctl.c
+@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
+ int __net_init xfrm_sysctl_init(struct net *net)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       __xfrm_sysctl_init(net);
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index d5d859c..781cbcb 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -111,7 +111,7 @@ endif
+ endif
+ # Do not include host rules unless needed
+-ifneq ($(hostprogs-y)$(hostprogs-m),)
++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
+ include scripts/Makefile.host
+ endif
+diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
+index 686cb0d..9d653bf 100644
+--- a/scripts/Makefile.clean
++++ b/scripts/Makefile.clean
+@@ -43,7 +43,8 @@ subdir-ymn   := $(addprefix $(obj)/,$(subdir-ymn))
+ __clean-files := $(extra-y) $(always)                  \
+                  $(targets) $(clean-files)             \
+                  $(host-progs)                         \
+-                 $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
++                 $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
++                 $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
+ __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
+diff --git a/scripts/Makefile.host b/scripts/Makefile.host
+index 1ac414f..38575f7 100644
+--- a/scripts/Makefile.host
++++ b/scripts/Makefile.host
+@@ -31,6 +31,8 @@
+ # Note: Shared libraries consisting of C++ files are not supported
+ __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
++__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
+ # C code
+ # Executables compiled from a single .c file
+@@ -54,11 +56,15 @@ host-cxxobjs       := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
+ # Shared libaries (only .c supported)
+ # Shared libraries (.so) - all .so files referenced in "xxx-objs"
+ host-cshlib   := $(sort $(filter %.so, $(host-cobjs)))
++host-cshlib   += $(sort $(filter %.so, $(__hostlibs)))
++host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
+ # Remove .so files from "xxx-objs"
+ host-cobjs    := $(filter-out %.so,$(host-cobjs))
++host-cxxobjs  := $(filter-out %.so,$(host-cxxobjs))
+-#Object (.o) files used by the shared libaries
++# Object (.o) files used by the shared libaries
+ host-cshobjs  := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
++host-cxxshobjs        := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
+ # output directory for programs/.o files
+ # hostprogs-y := tools/build may have been specified. Retrieve directory
+@@ -82,7 +88,9 @@ host-cobjs   := $(addprefix $(obj)/,$(host-cobjs))
+ host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
+ host-cxxobjs  := $(addprefix $(obj)/,$(host-cxxobjs))
+ host-cshlib   := $(addprefix $(obj)/,$(host-cshlib))
++host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
+ host-cshobjs  := $(addprefix $(obj)/,$(host-cshobjs))
++host-cxxshobjs        := $(addprefix $(obj)/,$(host-cxxshobjs))
+ host-objdirs    := $(addprefix $(obj)/,$(host-objdirs))
+ obj-dirs += $(host-objdirs)
+@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs    = HOSTCC  -fPIC $@
+ $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
+       $(call if_changed_dep,host-cshobjs)
++# Compile .c file, create position independent .o file
++# host-cxxshobjs -> .o
++quiet_cmd_host-cxxshobjs      = HOSTCXX -fPIC $@
++      cmd_host-cxxshobjs      = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
++$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
++      $(call if_changed_dep,host-cxxshobjs)
++
+ # Link a shared library, based on position independent .o files
+ # *.o -> .so shared library (host-cshlib)
+ quiet_cmd_host-cshlib = HOSTLLD -shared $@
+@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib     = HOSTLLD -shared $@
+ $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
+       $(call if_changed,host-cshlib)
++# Link a shared library, based on position independent .o files
++# *.o -> .so shared library (host-cxxshlib)
++quiet_cmd_host-cxxshlib       = HOSTLLD -shared $@
++      cmd_host-cxxshlib       = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
++                        $(addprefix $(obj)/,$($(@F:.so=-objs))) \
++                        $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
++$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
++      $(call if_changed,host-cxxshlib)
++
+ targets += $(host-csingle)  $(host-cmulti) $(host-cobjs)\
+-         $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) 
++         $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
+diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
+index 078fe1d..fbdb363 100644
+--- a/scripts/basic/fixdep.c
++++ b/scripts/basic/fixdep.c
+@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
+ /*
+  * Lookup a value in the configuration string.
+  */
+-static int is_defined_config(const char *name, int len, unsigned int hash)
++static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
+ {
+       struct item *aux;
+@@ -211,10 +211,10 @@ static void clear_config(void)
+ /*
+  * Record the use of a CONFIG_* word.
+  */
+-static void use_config(const char *m, int slen)
++static void use_config(const char *m, unsigned int slen)
+ {
+       unsigned int hash = strhash(m, slen);
+-      int c, i;
++      unsigned int c, i;
+       if (is_defined_config(m, slen, hash))
+           return;
+@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
+ static void parse_config_file(const char *map, size_t len)
+ {
+-      const int *end = (const int *) (map + len);
++      const unsigned int *end = (const unsigned int *) (map + len);
+       /* start at +1, so that p can never be < map */
+-      const int *m   = (const int *) map + 1;
++      const unsigned int *m   = (const unsigned int *) map + 1;
+       const char *p, *q;
+       for (; m < end; m++) {
+@@ -435,7 +435,7 @@ static void print_deps(void)
+ static void traps(void)
+ {
+       static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+-      int *p = (int *)test;
++      unsigned int *p = (unsigned int *)test;
+       if (*p != INT_CONF) {
+               fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
+diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
+new file mode 100644
+index 0000000..5e0222d
+--- /dev/null
++++ b/scripts/gcc-plugin.sh
+@@ -0,0 +1,17 @@
++#!/bin/bash
++plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
++#include "gcc-plugin.h"
++#include "tree.h"
++#include "tm.h"
++#include "rtl.h"
++#ifdef ENABLE_BUILD_WITH_CXX
++#warning $2
++#else
++#warning $1
++#endif
++EOF`
++if [ $? -eq 0 ]
++then
++      [[ "$plugincc" =~ "$1" ]] && echo "$1"
++      [[ "$plugincc" =~ "$2" ]] && echo "$2"
++fi
+diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
+index 643764f..6cc0137 100644
+--- a/scripts/headers_install.sh
++++ b/scripts/headers_install.sh
+@@ -29,6 +29,7 @@ do
+       FILE="$(basename "$i")"
+       sed -r \
+               -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
++              -e 's/__intentional_overflow\([- \t,0-9]*\)//g' \
+               -e 's/__attribute_const__([ \t]|$)/\1/g' \
+               -e 's@^#include <linux/compiler.h>@@' \
+               -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 0149949..d482a0d 100644
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -158,7 +158,7 @@ else
+ fi;
+ # final build of init/
+-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
++${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
+ kallsymso=""
+ kallsyms_vmlinux=""
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 45f9a33..e4194b3 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -140,7 +140,7 @@ static void device_id_check(const char *modname, const char *device_id,
+                           unsigned long size, unsigned long id_size,
+                           void *symval)
+ {
+-      int i;
++      unsigned int i;
+       if (size % id_size || size < id_size) {
+               fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
+@@ -168,7 +168,7 @@ static void device_id_check(const char *modname, const char *device_id,
+ /* USB is special because the bcdDevice can be matched against a numeric range */
+ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
+ static void do_usb_entry(void *symval,
+-                       unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
++                       unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
+                        unsigned char range_lo, unsigned char range_hi,
+                        unsigned char max, struct module *mod)
+ {
+@@ -278,7 +278,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod)
+ {
+       unsigned int devlo, devhi;
+       unsigned char chi, clo, max;
+-      int ndigits;
++      unsigned int ndigits;
+       DEF_FIELD(symval, usb_device_id, match_flags);
+       DEF_FIELD(symval, usb_device_id, idVendor);
+@@ -531,7 +531,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
+       for (i = 0; i < count; i++) {
+               DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
+               char acpi_id[sizeof(*id)];
+-              int j;
++              unsigned int j;
+               buf_printf(&mod->dev_table_buf,
+                          "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
+@@ -560,7 +560,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
+               for (j = 0; j < PNP_MAX_DEVICES; j++) {
+                       const char *id = (char *)(*devs)[j].id;
+-                      int i2, j2;
++                      unsigned int i2, j2;
+                       int dup = 0;
+                       if (!id[0])
+@@ -586,7 +586,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
+                       /* add an individual alias for every device entry */
+                       if (!dup) {
+                               char acpi_id[PNP_ID_LEN];
+-                              int k;
++                              unsigned int k;
+                               buf_printf(&mod->dev_table_buf,
+                                          "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -938,7 +938,7 @@ static void dmi_ascii_filter(char *d, const char *s)
+ static int do_dmi_entry(const char *filename, void *symval,
+                       char *alias)
+ {
+-      int i, j;
++      unsigned int i, j;
+       DEF_FIELD_ADDR(symval, dmi_system_id, matches);
+       sprintf(alias, "dmi*");
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index a4be8e1..6e8a5fb 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -933,6 +933,7 @@ enum mismatch {
+       ANY_INIT_TO_ANY_EXIT,
+       ANY_EXIT_TO_ANY_INIT,
+       EXPORT_TO_INIT_EXIT,
++      DATA_TO_TEXT
+ };
+ struct sectioncheck {
+@@ -1047,6 +1048,12 @@ const struct sectioncheck sectioncheck[] = {
+       .tosec   = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+       .mismatch = EXPORT_TO_INIT_EXIT,
+       .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
++},
++/* Do not reference code from writable data */
++{
++      .fromsec = { DATA_SECTIONS, NULL },
++      .tosec   = { TEXT_SECTIONS, NULL },
++      .mismatch = DATA_TO_TEXT
+ }
+ };
+@@ -1169,10 +1176,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+                       continue;
+               if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+                       continue;
+-              if (sym->st_value == addr)
+-                      return sym;
+               /* Find a symbol nearby - addr are maybe negative */
+               d = sym->st_value - addr;
++              if (d == 0)
++                      return sym;
+               if (d < 0)
+                       d = addr - sym->st_value;
+               if (d < distance) {
+@@ -1451,6 +1458,14 @@ static void report_sec_mismatch(const char *modname,
+               tosym, prl_to, prl_to, tosym);
+               free(prl_to);
+               break;
++      case DATA_TO_TEXT:
++#if 0
++              fprintf(stderr,
++              "The %s %s:%s references\n"
++              "the %s %s:%s%s\n",
++              from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
++              break;
+       }
+       fprintf(stderr, "\n");
+ }
+@@ -1685,7 +1700,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
+ static void check_sec_ref(struct module *mod, const char *modname,
+                           struct elf_info *elf)
+ {
+-      int i;
++      unsigned int i;
+       Elf_Shdr *sechdrs = elf->sechdrs;
+       /* Walk through all sections */
+@@ -1804,7 +1819,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
+       va_end(ap);
+ }
+-void buf_write(struct buffer *buf, const char *s, int len)
++void buf_write(struct buffer *buf, const char *s, unsigned int len)
+ {
+       if (buf->size - buf->pos < len) {
+               buf->size += len + SZ;
+@@ -2023,7 +2038,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
+       if (fstat(fileno(file), &st) < 0)
+               goto close_write;
+-      if (st.st_size != b->pos)
++      if (st.st_size != (off_t)b->pos)
+               goto close_write;
+       tmp = NOFAIL(malloc(b->pos));
+diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
+index 51207e4..f7d603d 100644
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
+ struct buffer {
+       char *p;
+-      int pos;
+-      int size;
++      unsigned int pos;
++      unsigned int size;
+ };
+ void __attribute__((format(printf, 2, 3)))
+ buf_printf(struct buffer *buf, const char *fmt, ...);
+ void
+-buf_write(struct buffer *buf, const char *s, int len);
++buf_write(struct buffer *buf, const char *s, unsigned int len);
+ struct module {
+       struct module *next;
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 9dfcd6d..099068e 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
+               goto out;
+       }
+-      if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
++      if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
+               warn("writing sum in %s failed: %s\n",
+                       filename, strerror(errno));
+               goto out;
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index acb8650..b8c5f02 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -246,6 +246,7 @@ fi
+ (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
+ (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
+ (cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
++(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
+ destdir=$kernel_headers_dir/usr/src/linux-headers-$version
+ mkdir -p "$destdir"
+ (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
+index 68bb4ef..2f419e1 100644
+--- a/scripts/pnmtologo.c
++++ b/scripts/pnmtologo.c
+@@ -244,14 +244,14 @@ static void write_header(void)
+     fprintf(out, " *  Linux logo %s\n", logoname);
+     fputs(" */\n\n", out);
+     fputs("#include <linux/linux_logo.h>\n\n", out);
+-    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
++    fprintf(out, "static unsigned char %s_data[] = {\n",
+           logoname);
+ }
+ static void write_footer(void)
+ {
+     fputs("\n};\n\n", out);
+-    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
++    fprintf(out, "const struct linux_logo %s = {\n", logoname);
+     fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+     fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+     fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+@@ -381,7 +381,7 @@ static void write_logo_clut224(void)
+     fputs("\n};\n\n", out);
+     /* write logo clut */
+-    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
++    fprintf(out, "static unsigned char %s_clut[] = {\n",
+           logoname);
+     write_hex_cnt = 0;
+     for (i = 0; i < logo_clutsize; i++) {
+diff --git a/scripts/sortextable.h b/scripts/sortextable.h
+index f5eb43d..1814de8 100644
+--- a/scripts/sortextable.h
++++ b/scripts/sortextable.h
+@@ -106,9 +106,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
+       const char *secstrtab;
+       const char *strtab;
+       char *extab_image;
+-      int extab_index = 0;
+-      int i;
+-      int idx;
++      unsigned int extab_index = 0;
++      unsigned int i;
++      unsigned int idx;
+       shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
+       shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
+diff --git a/security/Kconfig b/security/Kconfig
+index e9c6ac7..3e3f362 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -4,6 +4,959 @@
+ menu "Security options"
++menu "Grsecurity"
++
++      config ARCH_TRACK_EXEC_LIMIT
++      bool
++
++      config PAX_KERNEXEC_PLUGIN
++      bool
++
++      config PAX_PER_CPU_PGD
++      bool
++
++      config TASK_SIZE_MAX_SHIFT
++      int
++      depends on X86_64
++      default 47 if !PAX_PER_CPU_PGD
++      default 42 if PAX_PER_CPU_PGD
++
++      config PAX_ENABLE_PAE
++      bool
++      default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
++      
++      config PAX_USERCOPY_SLABS
++      bool
++
++config GRKERNSEC
++      bool "Grsecurity"
++      select CRYPTO
++      select CRYPTO_SHA256
++      select PROC_FS
++      select STOP_MACHINE
++      select TTY
++      help
++        If you say Y here, you will be able to configure many features
++        that will enhance the security of your system.  It is highly
++        recommended that you say Y here and read through the help
++        for each option so that you fully understand the features and
++        can evaluate their usefulness for your machine.
++
++choice
++      prompt "Configuration Method"
++      depends on GRKERNSEC
++      default GRKERNSEC_CONFIG_CUSTOM
++      help
++
++config GRKERNSEC_CONFIG_AUTO
++      bool "Automatic"
++      help
++        If you choose this configuration method, you'll be able to answer a small
++        number of simple questions about how you plan to use this kernel.
++        The settings of grsecurity and PaX will be automatically configured for
++        the highest commonly-used settings within the provided constraints.
++
++        If you require additional configuration, custom changes can still be made
++        from the "custom configuration" menu.
++
++config GRKERNSEC_CONFIG_CUSTOM
++      bool "Custom"
++      help
++        If you choose this configuration method, you'll be able to configure all
++        grsecurity and PaX settings manually.  Via this method, no options are
++        automatically enabled.
++
++endchoice
++
++choice
++      prompt "Usage Type"
++      depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++      default GRKERNSEC_CONFIG_SERVER
++      help
++
++config GRKERNSEC_CONFIG_SERVER
++      bool "Server"
++      help
++        Choose this option if you plan to use this kernel on a server.
++
++config GRKERNSEC_CONFIG_DESKTOP
++      bool "Desktop"
++      help
++        Choose this option if you plan to use this kernel on a desktop.
++
++endchoice
++
++choice
++      prompt "Virtualization Type"
++      depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
++      default GRKERNSEC_CONFIG_VIRT_NONE
++      help
++
++config GRKERNSEC_CONFIG_VIRT_NONE
++      bool "None"
++      help
++        Choose this option if this kernel will be run on bare metal.
++
++config GRKERNSEC_CONFIG_VIRT_GUEST
++      bool "Guest"
++      help
++        Choose this option if this kernel will be run as a VM guest.
++
++config GRKERNSEC_CONFIG_VIRT_HOST
++      bool "Host"
++      help
++        Choose this option if this kernel will be run as a VM host.
++
++endchoice
++
++choice
++      prompt "Virtualization Hardware"
++      depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
++      help
++
++config GRKERNSEC_CONFIG_VIRT_EPT
++      bool "EPT/RVI Processor Support"
++      depends on X86
++      help
++        Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
++        hardware virtualization.  This allows for additional kernel hardening protections
++        to operate without additional performance impact.
++
++        To see if your Intel processor supports EPT, see:
++        http://ark.intel.com/Products/VirtualizationTechnology
++        (Most Core i3/5/7 support EPT)
++
++        To see if your AMD processor supports RVI, see:
++        http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
++
++config GRKERNSEC_CONFIG_VIRT_SOFT
++      bool "First-gen/No Hardware Virtualization"
++      help
++        Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
++        support hardware virtualization or doesn't support the EPT/RVI extensions.
++
++endchoice
++
++choice
++      prompt "Virtualization Software"
++      depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
++      help
++
++config GRKERNSEC_CONFIG_VIRT_XEN
++      bool "Xen"
++      help
++        Choose this option if this kernel is running as a Xen guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_VMWARE
++      bool "VMWare"
++      help
++        Choose this option if this kernel is running as a VMWare guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_KVM
++      bool "KVM"
++      help
++        Choose this option if this kernel is running as a KVM guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
++      bool "VirtualBox"
++      help
++        Choose this option if this kernel is running as a VirtualBox guest or host.
++
++endchoice
++
++choice
++      prompt "Required Priorities"
++      depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++      default GRKERNSEC_CONFIG_PRIORITY_PERF
++      help
++
++config GRKERNSEC_CONFIG_PRIORITY_PERF
++      bool "Performance"
++      help
++        Choose this option if performance is of highest priority for this deployment
++        of grsecurity.  Features like UDEREF on a 64bit kernel, kernel stack clearing,
++        clearing of structures intended for userland, and freed memory sanitizing will
++        be disabled.
++
++config GRKERNSEC_CONFIG_PRIORITY_SECURITY
++      bool "Security"
++      help
++        Choose this option if security is of highest priority for this deployment of
++        grsecurity.  UDEREF, kernel stack clearing, clearing of structures intended
++        for userland, and freed memory sanitizing will be enabled for this kernel.
++        In a worst-case scenario, these features can introduce a 20% performance hit
++        (UDEREF on x64 contributing half of this hit).
++
++endchoice
++
++menu "Default Special Groups"
++depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++
++config GRKERNSEC_PROC_GID
++      int "GID exempted from /proc restrictions"
++      default 1001
++      help
++        Setting this GID determines which group will be exempted from
++        grsecurity's /proc restrictions, allowing users of the specified
++        group  to view network statistics and the existence of other users'
++        processes on the system.  This GID may also be chosen at boot time
++        via "grsec_proc_gid=" on the kernel commandline.
++
++config GRKERNSEC_TPE_UNTRUSTED_GID
++        int "GID for TPE-untrusted users"
++        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++        default 1005
++        help
++        Setting this GID determines which group untrusted users should
++        be added to.  These users will be placed under grsecurity's Trusted Path
++        Execution mechanism, preventing them from executing their own binaries.
++        The users will only be able to execute binaries in directories owned and
++        writable only by the root user.  If the sysctl option is enabled, a sysctl
++        option with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_TRUSTED_GID
++        int "GID for TPE-trusted users"
++        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++        default 1005
++        help
++          Setting this GID determines what group TPE restrictions will be
++          *disabled* for.  If the sysctl option is enabled, a sysctl option
++          with name "tpe_gid" is created.
++
++config GRKERNSEC_SYMLINKOWN_GID
++        int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
++        depends on GRKERNSEC_CONFIG_SERVER
++        default 1006
++        help
++          Setting this GID determines what group kernel-enforced
++          SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
++          is enabled, a sysctl option with name "symlinkown_gid" is created.
++
++
++endmenu
++
++menu "Customize Configuration"
++depends on GRKERNSEC
++
++menu "PaX"
++
++config PAX
++      bool "Enable various PaX features"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++      help
++        This allows you to enable various PaX features.  PaX adds
++        intrusion prevention mechanisms to the kernel that reduce
++        the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++      depends on PAX
++
++config PAX_SOFTMODE
++      bool 'Support soft mode'
++      help
++        Enabling this option will allow you to run PaX in soft mode, that
++        is, PaX features will not be enforced by default, only on executables
++        marked explicitly.  You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
++        support as they are the only way to mark executables for soft mode use.
++
++        Soft mode can be activated by using the "pax_softmode=1" kernel command
++        line option on boot.  Furthermore you can control various PaX features
++        at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++      bool 'Use legacy ELF header marking'
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        Enabling this option will allow you to control PaX features on
++        a per executable basis via the 'chpax' utility available at
++        http://pax.grsecurity.net/.  The control flags will be read from
++        an otherwise reserved part of the ELF header.  This marking has
++        numerous drawbacks (no support for soft-mode, toolchain does not
++        know about the non-standard use of the ELF header) therefore it
++        has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
++        support.
++
++        Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
++        support as well, they will override the legacy EI_PAX marks.
++
++        If you enable none of the marking options then all applications
++        will run with PaX enabled on them by default.
++
++config PAX_PT_PAX_FLAGS
++      bool 'Use ELF program header marking'
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        Enabling this option will allow you to control PaX features on
++        a per executable basis via the 'paxctl' utility available at
++        http://pax.grsecurity.net/.  The control flags will be read from
++        a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
++        has the benefits of supporting both soft mode and being fully
++        integrated into the toolchain (the binutils patch is available
++        from http://pax.grsecurity.net).
++
++        Note that if you enable the legacy EI_PAX marking support as well,
++        the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++
++        If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++        must make sure that the marks are the same if a binary has both marks.
++
++        If you enable none of the marking options then all applications
++        will run with PaX enabled on them by default.
++
++config PAX_XATTR_PAX_FLAGS
++      bool 'Use filesystem extended attributes marking'
++      default y if GRKERNSEC_CONFIG_AUTO
++      select CIFS_XATTR if CIFS
++      select EXT2_FS_XATTR if EXT2_FS
++      select EXT3_FS_XATTR if EXT3_FS
++      select EXT4_FS_XATTR if EXT4_FS
++      select JFFS2_FS_XATTR if JFFS2_FS
++      select REISERFS_FS_XATTR if REISERFS_FS
++      select SQUASHFS_XATTR if SQUASHFS
++      select TMPFS_XATTR if TMPFS
++      select UBIFS_FS_XATTR if UBIFS_FS
++      help
++        Enabling this option will allow you to control PaX features on
++        a per executable basis via the 'setfattr' utility.  The control
++        flags will be read from the user.pax.flags extended attribute of
++        the file.  This marking has the benefit of supporting binary-only
++        applications that self-check themselves (e.g., skype) and would
++        not tolerate chpax/paxctl changes.  The main drawback is that
++        extended attributes are not supported by some filesystems (e.g.,
++        isofs, udf, vfat) so copying files through such filesystems will
++        lose the extended attributes and these PaX markings.
++
++        Note that if you enable the legacy EI_PAX marking support as well,
++        the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++
++        If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++        must make sure that the marks are the same if a binary has both marks.
++
++        If you enable none of the marking options then all applications
++        will run with PaX enabled on them by default.
++
++choice
++      prompt 'MAC system integration'
++      default PAX_HAVE_ACL_FLAGS
++      help
++        Mandatory Access Control systems have the option of controlling
++        PaX flags on a per executable basis, choose the method supported
++        by your particular system.
++
++        - "none": if your MAC system does not interact with PaX,
++        - "direct": if your MAC system defines pax_set_initial_flags() itself,
++        - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
++
++        NOTE: this option is for developers/integrators only.
++
++      config PAX_NO_ACL_FLAGS
++              bool 'none'
++
++      config PAX_HAVE_ACL_FLAGS
++              bool 'direct'
++
++      config PAX_HOOK_ACL_FLAGS
++              bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++      depends on PAX
++
++config PAX_NOEXEC
++      bool "Enforce non-executable pages"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
++      help
++        By design some architectures do not allow for protecting memory
++        pages against execution or even if they do, Linux does not make
++        use of this feature.  In practice this means that if a page is
++        readable (such as the stack or heap) it is also executable.
++
++        There is a well known exploit technique that makes use of this
++        fact and a common programming mistake where an attacker can
++        introduce code of his choice somewhere in the attacked program's
++        memory (typically the stack or the heap) and then execute it.
++
++        If the attacked program was running with different (typically
++        higher) privileges than that of the attacker, then he can elevate
++        his own privilege level (e.g. get a root shell, write to files for
++        which he does not have write access to, etc).
++
++        Enabling this option will let you choose from various features
++        that prevent the injection and execution of 'foreign' code in
++        a program.
++
++        This will also break programs that rely on the old behaviour and
++        expect that dynamically allocated memory via the malloc() family
++        of functions is executable (which it is not).  Notable examples
++        are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++      bool "Paging based non-executable pages"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
++      select ARCH_TRACK_EXEC_LIMIT if X86_32
++      help
++        This implementation is based on the paging feature of the CPU.
++        On i386 without hardware non-executable bit support there is a
++        variable but usually low performance impact, however on Intel's
++        P4 core based CPUs it is very high so you should not enable this
++        for kernels meant to be used on such CPUs.
++
++        On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
++        with hardware non-executable bit support there is no performance
++        impact, on ppc the impact is negligible.
++
++        Note that several architectures require various emulations due to
++        badly designed userland ABIs, this will cause a performance impact
++        but will disappear as soon as userland is fixed. For example, ppc
++        userland MUST have been built with secure-plt by a recent toolchain.
++
++config PAX_SEGMEXEC
++      bool "Segmentation based non-executable pages"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_NOEXEC && X86_32
++      help
++        This implementation is based on the segmentation feature of the
++        CPU and has a very small performance impact, however applications
++        will be limited to a 1.5 GB address space instead of the normal
++        3 GB.
++
++config PAX_EMUTRAMP
++      bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
++      default y if PARISC
++      help
++        There are some programs and libraries that for one reason or
++        another attempt to execute special small code snippets from
++        non-executable memory pages.  Most notable examples are the
++        signal handler return code generated by the kernel itself and
++        the GCC trampolines.
++
++        If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++        such programs will no longer work under your kernel.
++
++        As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++        utilities to enable trampoline emulation for the affected programs
++        yet still have the protection provided by the non-executable pages.
++
++        On parisc you MUST enable this option and EMUSIGRT as well, otherwise
++        your system will not even boot.
++
++        Alternatively you can say N here and use the 'chpax' or 'paxctl'
++        utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++        for the affected files.
++
++        NOTE: enabling this feature *may* open up a loophole in the
++        protection provided by non-executable pages that an attacker
++        could abuse.  Therefore the best solution is to not have any
++        files on your system that would require this option.  This can
++        be achieved by not using libc5 (which relies on the kernel
++        signal handler return code) and not using or rewriting programs
++        that make use of the nested function implementation of GCC.
++        Skilled users can just fix GCC itself so that it implements
++        nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++      bool "Automatically emulate sigreturn trampolines"
++      depends on PAX_EMUTRAMP && PARISC
++      default y
++      help
++        Enabling this option will have the kernel automatically detect
++        and emulate signal return trampolines executing on the stack
++        that would otherwise lead to task termination.
++
++        This solution is intended as a temporary one for users with
++        legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++        Modula-3 runtime, etc) or executables linked to such, basically
++        everything that does not specify its own SA_RESTORER function in
++        normal executable memory like glibc 2.1+ does.
++
++        On parisc you MUST enable this option, otherwise your system will
++        not even boot.
++
++        NOTE: this feature cannot be disabled on a per executable basis
++        and since it *does* open up a loophole in the protection provided
++        by non-executable pages, the best solution is to not have any
++        files on your system that would require this option.
++
++config PAX_MPROTECT
++      bool "Restrict mprotect()"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
++      help
++        Enabling this option will prevent programs from
++         - changing the executable status of memory pages that were
++           not originally created as executable,
++         - making read-only executable pages writable again,
++         - creating executable pages from anonymous memory,
++         - making read-only-after-relocations (RELRO) data pages writable again.
++
++        You should say Y here to complete the protection provided by
++        the enforcement of non-executable pages.
++
++        NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++        this feature on a per file basis.
++
++config PAX_MPROTECT_COMPAT
++      bool "Use legacy/compat protection demoting (read help)"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
++      depends on PAX_MPROTECT
++      help
++        The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
++        by sending the proper error code to the application.  For some broken 
++        userland, this can cause problems with Python or other applications.  The
++        current implementation however allows for applications like clamav to
++        detect if JIT compilation/execution is allowed and to fall back gracefully
++        to an interpreter-based mode if it does not.  While we encourage everyone
++        to use the current implementation as-is and push upstream to fix broken
++        userland (note that the RWX logging option can assist with this), in some
++        environments this may not be possible.  Having to disable MPROTECT
++        completely on certain binaries reduces the security benefit of PaX,
++        so this option is provided for those environments to revert to the old
++        behavior.
++        
++config PAX_ELFRELOCS
++      bool "Allow ELF text relocations (read help)"
++      depends on PAX_MPROTECT
++      default n
++      help
++        Non-executable pages and mprotect() restrictions are effective
++        in preventing the introduction of new executable code into an
++        attacked task's address space.  There remain only two venues
++        for this kind of attack: if the attacker can execute already
++        existing code in the attacked task then he can either have it
++        create and mmap() a file containing his code or have it mmap()
++        an already existing ELF library that does not have position
++        independent code in it and use mprotect() on it to make it
++        writable and copy his code there.  While protecting against
++        the former approach is beyond PaX, the latter can be prevented
++        by having only PIC ELF libraries on one's system (which do not
++        need to relocate their code).  If you are sure this is your case,
++        as is the case with all modern Linux distributions, then leave
++        this option disabled.  You should say 'n' here.
++
++config PAX_ETEXECRELOCS
++      bool "Allow ELF ET_EXEC text relocations"
++      depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++      select PAX_ELFRELOCS
++      default y
++      help
++        On some architectures there are incorrectly created applications
++        that require text relocations and would not work without enabling
++        this option.  If you are an alpha, ia64 or parisc user, you should
++        enable this option and disable it once you have made sure that
++        none of your applications need it.
++
++config PAX_EMUPLT
++      bool "Automatically emulate ELF PLT"
++      depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
++      default y
++      help
++        Enabling this option will have the kernel automatically detect
++        and emulate the Procedure Linkage Table entries in ELF files.
++        On some architectures such entries are in writable memory, and
++        become non-executable leading to task termination.  Therefore
++        it is mandatory that you enable this option on alpha, parisc,
++        sparc and sparc64, otherwise your system would not even boot.
++
++        NOTE: this feature *does* open up a loophole in the protection
++        provided by the non-executable pages, therefore the proper
++        solution is to modify the toolchain to produce a PLT that does
++        not need to be writable.
++
++config PAX_DLRESOLVE
++      bool 'Emulate old glibc resolver stub'
++      depends on PAX_EMUPLT && SPARC
++      default n
++      help
++        This option is needed if userland has an old glibc (before 2.4)
++        that puts a 'save' instruction into the runtime generated resolver
++        stub that needs special emulation.
++
++config PAX_KERNEXEC
++      bool "Enforce non-executable kernel pages"
++      default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
++      depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
++      select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
++      select PAX_KERNEXEC_PLUGIN if X86_64
++      help
++        This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++        that is, enabling this option will make it harder to inject
++        and execute 'foreign' code in kernel memory itself.
++
++choice
++      prompt "Return Address Instrumentation Method"
++      default PAX_KERNEXEC_PLUGIN_METHOD_BTS
++      depends on PAX_KERNEXEC_PLUGIN
++      help
++        Select the method used to instrument function pointer dereferences.
++        Note that binary modules cannot be instrumented by this approach.
++
++        Note that the implementation requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.
++
++      config PAX_KERNEXEC_PLUGIN_METHOD_BTS
++              bool "bts"
++              help
++                This method is compatible with binary only modules but has
++                a higher runtime overhead.
++
++      config PAX_KERNEXEC_PLUGIN_METHOD_OR
++              bool "or"
++              depends on !PARAVIRT
++              help
++                This method is incompatible with binary only modules but has
++                a lower runtime overhead.
++endchoice
++
++config PAX_KERNEXEC_PLUGIN_METHOD
++      string
++      default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
++      default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
++      default ""
++
++config PAX_KERNEXEC_MODULE_TEXT
++      int "Minimum amount of memory reserved for module code"
++      default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
++      default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
++      depends on PAX_KERNEXEC && X86_32
++      help
++        Due to implementation details the kernel must reserve a fixed
++        amount of memory for runtime allocated code (such as modules)
++        at compile time that cannot be changed at runtime.  Here you
++        can specify the minimum amount in MB that will be reserved.
++        Due to the same implementation details this size will always
++        be rounded up to the next 2/4 MB boundary (depends on PAE) so
++        the actually available memory for runtime allocated code will
++        usually be more than this minimum.
++
++        The default 4 MB should be enough for most users but if you have
++        an excessive number of modules (e.g., most distribution configs
++        compile many drivers as modules) or use huge modules such as
++        nvidia's kernel driver, you will need to adjust this amount.
++        A good rule of thumb is to look at your currently loaded kernel
++        modules and add up their sizes.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++      depends on PAX
++
++config PAX_ASLR
++      bool "Address Space Layout Randomization"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        Many if not most exploit techniques rely on the knowledge of
++        certain addresses in the attacked program.  The following options
++        will allow the kernel to apply a certain amount of randomization
++        to specific parts of the program thereby forcing an attacker to
++        guess them in most cases.  Any failed guess will most likely crash
++        the attacked program which allows the kernel to detect such attempts
++        and react on them.  PaX itself provides no reaction mechanisms,
++        instead it is strongly encouraged that you make use of Nergal's
++        segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++        (http://www.grsecurity.net/) built-in crash detection features or
++        develop one yourself.
++
++        By saying Y here you can choose to randomize the following areas:
++         - top of the task's kernel stack
++         - top of the task's userland stack
++         - base address for mmap() requests that do not specify one
++           (this includes all libraries)
++         - base address of the main executable
++
++        It is strongly recommended to say Y here as address space layout
++        randomization has negligible impact on performance yet it provides
++        a very effective protection.
++
++        NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++        this feature on a per file basis.
++
++config PAX_RANDKSTACK
++      bool "Randomize kernel stack base"
++      default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
++      depends on X86_TSC && X86
++      help
++        By saying Y here the kernel will randomize every task's kernel
++        stack on every system call.  This will not only force an attacker
++        to guess it but also prevent him from making use of possible
++        leaked information about it.
++
++        Since the kernel stack is a rather scarce resource, randomization
++        may cause unexpected stack overflows, therefore you should very
++        carefully test your system.  Note that once enabled in the kernel
++        configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++      bool "Randomize user stack base"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_ASLR
++      help
++        By saying Y here the kernel will randomize every task's userland
++        stack.  The randomization is done in two steps where the second
++        one may apply a big amount of shift to the top of the stack and
++        cause problems for programs that want to use lots of memory (more
++        than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++        For this reason the second step can be controlled by 'chpax' or
++        'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++      bool "Randomize mmap() base"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_ASLR
++      help
++        By saying Y here the kernel will use a randomized base address for
++        mmap() requests that do not specify one themselves.  As a result
++        all dynamically loaded libraries will appear at random addresses
++        and therefore be harder to exploit by a technique where an attacker
++        attempts to execute library code for his purposes (e.g. spawn a
++        shell from an exploited program that is running at an elevated
++        privilege level).
++
++        Furthermore, if a program is relinked as a dynamic ELF file, its
++        base address will be randomized as well, completing the full
++        randomization of the address space layout.  Attacking such programs
++        becomes a guess game.  You can find an example of doing this at
++        http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++        http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++        NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++        feature on a per file basis.
++
++endmenu
++
++menu "Miscellaneous hardening features"
++
++config PAX_MEMORY_SANITIZE
++      bool "Sanitize all freed memory"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++      depends on !HIBERNATION
++      help
++        By saying Y here the kernel will erase memory pages and slab objects
++        as soon as they are freed.  This in turn reduces the lifetime of data
++        stored in them, making it less likely that sensitive information such
++        as passwords, cryptographic secrets, etc stay in memory for too long.
++
++        This is especially useful for programs whose runtime is short, long
++        lived processes and the kernel itself benefit from this as long as
++        they ensure timely freeing of memory that may hold sensitive
++        information.
++
++        A nice side effect of the sanitization of slab objects is the
++        reduction of possible info leaks caused by padding bytes within the
++        leaky structures.  Use-after-free bugs for structures containing
++        pointers can also be detected as dereferencing the sanitized pointer
++        will generate an access violation.
++
++        The tradeoff is performance impact, on a single CPU system kernel
++        compilation sees a 3% slowdown, other systems and workloads may vary
++        and you are advised to test this feature on your expected workload
++        before deploying it.
++
++        To reduce the performance penalty by sanitizing pages only, albeit
++        limiting the effectiveness of this feature at the same time, slab
++        sanitization can be disabled with the kernel commandline parameter
++        "pax_sanitize_slab=0".
++
++        Note that this feature does not protect data stored in live pages,
++        e.g., process memory swapped to disk may stay there for a long time.
++
++config PAX_MEMORY_STACKLEAK
++      bool "Sanitize kernel stack"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++      depends on X86
++      help
++        By saying Y here the kernel will erase the kernel stack before it
++        returns from a system call.  This in turn reduces the information
++        that a kernel stack leak bug can reveal.
++
++        Note that such a bug can still leak information that was put on
++        the stack by the current system call (the one eventually triggering
++        the bug) but traces of earlier system calls on the kernel stack
++        cannot leak anymore.
++
++        The tradeoff is performance impact: on a single CPU system kernel
++        compilation sees a 1% slowdown, other systems and workloads may vary
++        and you are advised to test this feature on your expected workload
++        before deploying it.
++
++        Note that the full feature requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.  Using
++        older gcc versions means that functions with large enough stack
++        frames may leave uninitialized memory behind that may be exposed
++        to a later syscall leaking the stack.
++
++config PAX_MEMORY_STRUCTLEAK
++      bool "Forcibly initialize local variables copied to userland"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++      help
++        By saying Y here the kernel will zero initialize some local
++        variables that are going to be copied to userland.  This in
++        turn prevents unintended information leakage from the kernel
++        stack should later code forget to explicitly set all parts of
++        the copied variable.
++
++        The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
++        at a much smaller coverage.
++
++        Note that the implementation requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.
++
++config PAX_MEMORY_UDEREF
++      bool "Prevent invalid userland pointer dereference"
++      default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
++      depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
++      select PAX_PER_CPU_PGD if X86_64
++      help
++        By saying Y here the kernel will be prevented from dereferencing
++        userland pointers in contexts where the kernel expects only kernel
++        pointers.  This is both a useful runtime debugging feature and a
++        security measure that prevents exploiting a class of kernel bugs.
++
++        The tradeoff is that some virtualization solutions may experience
++        a huge slowdown and therefore you should not enable this feature
++        for kernels meant to run in such environments.  Whether a given VM
++        solution is affected or not is best determined by simply trying it
++        out, the performance impact will be obvious right on boot as this
++        mechanism engages from very early on.  A good rule of thumb is that
++        VMs running on CPUs without hardware virtualization support (i.e.,
++        the majority of IA-32 CPUs) will likely experience the slowdown.
++
++        On X86_64 the kernel will make use of PCID support when available
++        (Intel's Westmere, Sandy Bridge, etc) for better security (default)
++        or performance impact.  Pass pax_weakuderef on the kernel command
++        line to choose the latter.
++
++config PAX_REFCOUNT
++      bool "Prevent various kernel object reference counter overflows"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || SPARC64 || X86)
++      help
++        By saying Y here the kernel will detect and prevent overflowing
++        various (but not all) kinds of object reference counters.  Such
++        overflows can normally occur due to bugs only and are often, if
++        not always, exploitable.
++
++        The tradeoff is that data structures protected by an overflowed
++        refcount will never be freed and therefore will leak memory.  Note
++        that this leak also happens even without this protection but in
++        that case the overflow can eventually trigger the freeing of the
++        data structure while it is still being used elsewhere, resulting
++        in the exploitable situation that this feature prevents.
++
++        Since this has a negligible performance impact, you should enable
++        this feature.
++
++config PAX_CONSTIFY_PLUGIN
++      bool "Automatically constify eligible structures"
++      default y
++      depends on !UML && PAX_KERNEXEC
++      help
++        By saying Y here the compiler will automatically constify a class
++        of types that contain only function pointers.  This reduces the
++        kernel's attack surface and also produces a better memory layout.
++
++        Note that the implementation requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.
++ 
++        Note that if some code really has to modify constified variables
++        then the source code will have to be patched to allow it.  Examples
++        can be found in PaX itself (the no_const attribute) and for some
++        out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
++
++config PAX_USERCOPY
++      bool "Harden heap object copies between kernel and userland"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on ARM || IA64 || PPC || SPARC || X86
++      depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++      select PAX_USERCOPY_SLABS
++      help
++        By saying Y here the kernel will enforce the size of heap objects
++        when they are copied in either direction between the kernel and
++        userland, even if only a part of the heap object is copied.
++
++        Specifically, this checking prevents information leaking from the
++        kernel heap during kernel to userland copies (if the kernel heap
++        object is otherwise fully initialized) and prevents kernel heap
++        overflows during userland to kernel copies.
++
++        Note that the current implementation provides the strictest bounds
++        checks for the SLUB allocator.
++
++        Enabling this option also enables per-slab cache protection against
++        data in a given cache being copied into/out of via userland
++        accessors.  Though the whitelist of regions will be reduced over
++        time, it notably protects important data structures like task structs.
++
++        If frame pointers are enabled on x86, this option will also restrict
++        copies into and out of the kernel stack to local variables within a
++        single frame.
++
++        Since this has a negligible performance impact, you should enable
++        this feature.
++
++config PAX_USERCOPY_DEBUG
++      bool
++      depends on X86 && PAX_USERCOPY
++      default n
++
++config PAX_SIZE_OVERFLOW
++      bool "Prevent various integer overflows in function size parameters"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on X86
++      help
++        By saying Y here the kernel recomputes expressions of function
++        arguments marked by a size_overflow attribute with double integer
++        precision (DImode/TImode for 32/64 bit integer types).
++
++        The recomputed argument is checked against TYPE_MAX and an event
++        is logged on overflow and the triggering process is killed.
++
++        Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
++
++        Note that the implementation requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.
++
++config PAX_LATENT_ENTROPY
++      bool "Generate some entropy during boot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        By saying Y here the kernel will instrument early boot code to
++        extract some entropy from both original and artificially created
++        program state.  This will help especially embedded systems where
++        there is little 'natural' source of entropy normally.  The cost
++        is some slowdown of the boot process.
++
++        When pax_extra_latent_entropy is passed on the kernel command line,
++        entropy will be extracted from up to the first 4GB of RAM while the
++        runtime memory allocator is being initialized.  This costs even more
++        slowdown of the boot process.
++
++        Note that the implementation requires a gcc with plugin support,
++        i.e., gcc 4.5 or newer.  You may need to install the supporting
++        headers explicitly in addition to the normal gcc package.
++
++        Note that entropy extracted this way is not cryptographically
++        secure!
++
++endmenu
++
++endmenu
++
++source grsecurity/Kconfig
++
++endmenu
++
++endmenu
++
+ source security/keys/Kconfig
+ config SECURITY_DMESG_RESTRICT
+@@ -103,7 +1056,7 @@ config INTEL_TXT
+ config LSM_MMAP_MIN_ADDR
+       int "Low address space for LSM to protect from user allocation"
+       depends on SECURITY && SECURITY_SELINUX
+-      default 32768 if ARM
++      default 32768 if ALPHA || ARM || PARISC || SPARC32
+       default 65536
+       help
+         This is the portion of low virtual memory which should be protected
+diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
+index 9b9013b..51ebf96 100644
+--- a/security/apparmor/Kconfig
++++ b/security/apparmor/Kconfig
+@@ -29,3 +29,12 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
+         boot.
+         If you are unsure how to answer this question, answer 1.
++
++config SECURITY_APPARMOR_COMPAT_24
++      bool "Enable AppArmor 2.4 compatability"
++      depends on SECURITY_APPARMOR
++      default y
++      help
++        This option enables compatability with AppArmor 2.4.  It is
++          recommended if compatability with older versions of AppArmor
++          is desired.
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 16c15ec..42b7c9f 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -182,6 +182,234 @@ const struct file_operations aa_fs_seq_file_ops = {
+       .release        = single_release,
+ };
++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
++/**
++ * __next_namespace - find the next namespace to list
++ * @root: root namespace to stop search at (NOT NULL)
++ * @ns: current ns position (NOT NULL)
++ *
++ * Find the next namespace from @ns under @root and handle all locking needed
++ * while switching current namespace.
++ *
++ * Returns: next namespace or NULL if at last namespace under @root
++ * NOTE: will not unlock root->lock
++ */
++static struct aa_namespace *__next_namespace(struct aa_namespace *root,
++                                           struct aa_namespace *ns)
++{
++      struct aa_namespace *parent;
++
++      /* is next namespace a child */
++      if (!list_empty(&ns->sub_ns)) {
++              struct aa_namespace *next;
++              next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
++              read_lock(&next->lock);
++              return next;
++      }
++
++      /* check if the next ns is a sibling, parent, gp, .. */
++      parent = ns->parent;
++      while (parent) {
++              read_unlock(&ns->lock);
++              list_for_each_entry_continue(ns, &parent->sub_ns, base.list) {
++                      read_lock(&ns->lock);
++                      return ns;
++              }
++              if (parent == root)
++                      return NULL;
++              ns = parent;
++              parent = parent->parent;
++      }
++
++      return NULL;
++}
++
++/**
++ * __first_profile - find the first profile in a namespace
++ * @root: namespace that is root of profiles being displayed (NOT NULL)
++ * @ns: namespace to start in   (NOT NULL)
++ *
++ * Returns: unrefcounted profile or NULL if no profile
++ */
++static struct aa_profile *__first_profile(struct aa_namespace *root,
++                                        struct aa_namespace *ns)
++{
++      for ( ; ns; ns = __next_namespace(root, ns)) {
++              if (!list_empty(&ns->base.profiles))
++                      return list_first_entry(&ns->base.profiles,
++                                              struct aa_profile, base.list);
++      }
++      return NULL;
++}
++
++/**
++ * __next_profile - step to the next profile in a profile tree
++ * @profile: current profile in tree (NOT NULL)
++ *
++ * Perform a depth first taversal on the profile tree in a namespace
++ *
++ * Returns: next profile or NULL if done
++ * Requires: profile->ns.lock to be held
++ */
++static struct aa_profile *__next_profile(struct aa_profile *p)
++{
++      struct aa_profile *parent;
++      struct aa_namespace *ns = p->ns;
++
++      /* is next profile a child */
++      if (!list_empty(&p->base.profiles))
++              return list_first_entry(&p->base.profiles, typeof(*p),
++                                      base.list);
++
++      /* is next profile a sibling, parent sibling, gp, subling, .. */
++      parent = p->parent;
++      while (parent) {
++              list_for_each_entry_continue(p, &parent->base.profiles,
++                                           base.list)
++                              return p;
++              p = parent;
++              parent = parent->parent;
++      }
++
++      /* is next another profile in the namespace */
++      list_for_each_entry_continue(p, &ns->base.profiles, base.list)
++              return p;
++
++      return NULL;
++}
++
++/**
++ * next_profile - step to the next profile in where ever it may be
++ * @root: root namespace  (NOT NULL)
++ * @profile: current profile  (NOT NULL)
++ *
++ * Returns: next profile or NULL if there isn't one
++ */
++static struct aa_profile *next_profile(struct aa_namespace *root,
++                                     struct aa_profile *profile)
++{
++      struct aa_profile *next = __next_profile(profile);
++      if (next)
++              return next;
++
++      /* finished all profiles in namespace move to next namespace */
++      return __first_profile(root, __next_namespace(root, profile->ns));
++}
++
++/**
++ * p_start - start a depth first traversal of profile tree
++ * @f: seq_file to fill
++ * @pos: current position
++ *
++ * Returns: first profile under current namespace or NULL if none found
++ *
++ * acquires first ns->lock
++ */
++static void *p_start(struct seq_file *f, loff_t *pos)
++      __acquires(root->lock)
++{
++      struct aa_profile *profile = NULL;
++      struct aa_namespace *root = aa_current_profile()->ns;
++      loff_t l = *pos;
++      f->private = aa_get_namespace(root);
++
++
++      /* find the first profile */
++      read_lock(&root->lock);
++      profile = __first_profile(root, root);
++
++      /* skip to position */
++      for (; profile && l > 0; l--)
++              profile = next_profile(root, profile);
++
++      return profile;
++}
++
++/**
++ * p_next - read the next profile entry
++ * @f: seq_file to fill
++ * @p: profile previously returned
++ * @pos: current position
++ *
++ * Returns: next profile after @p or NULL if none
++ *
++ * may acquire/release locks in namespace tree as necessary
++ */
++static void *p_next(struct seq_file *f, void *p, loff_t *pos)
++{
++      struct aa_profile *profile = p;
++      struct aa_namespace *root = f->private;
++      (*pos)++;
++
++      return next_profile(root, profile);
++}
++
++/**
++ * p_stop - stop depth first traversal
++ * @f: seq_file we are filling
++ * @p: the last profile writen
++ *
++ * Release all locking done by p_start/p_next on namespace tree
++ */
++static void p_stop(struct seq_file *f, void *p)
++      __releases(root->lock)
++{
++      struct aa_profile *profile = p;
++      struct aa_namespace *root = f->private, *ns;
++
++      if (profile) {
++              for (ns = profile->ns; ns && ns != root; ns = ns->parent)
++                      read_unlock(&ns->lock);
++      }
++      read_unlock(&root->lock);
++      aa_put_namespace(root);
++}
++
++/**
++ * seq_show_profile - show a profile entry
++ * @f: seq_file to file
++ * @p: current position (profile)    (NOT NULL)
++ *
++ * Returns: error on failure
++ */
++static int seq_show_profile(struct seq_file *f, void *p)
++{
++      struct aa_profile *profile = (struct aa_profile *)p;
++      struct aa_namespace *root = f->private;
++
++      if (profile->ns != root)
++              seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
++      seq_printf(f, "%s (%s)\n", profile->base.hname,
++                 COMPLAIN_MODE(profile) ? "complain" : "enforce");
++
++      return 0;
++}
++
++static const struct seq_operations aa_fs_profiles_op = {
++      .start = p_start,
++      .next = p_next,
++      .stop = p_stop,
++      .show = seq_show_profile,
++};
++
++static int profiles_open(struct inode *inode, struct file *file)
++{
++      return seq_open(file, &aa_fs_profiles_op);
++}
++
++static int profiles_release(struct inode *inode, struct file *file)
++{
++      return seq_release(inode, file);
++}
++
++const struct file_operations aa_fs_profiles_fops = {
++      .open = profiles_open,
++      .read = seq_read,
++      .llseek = seq_lseek,
++      .release = profiles_release,
++};
++#endif /* CONFIG_SECURITY_APPARMOR_COMPAT_24 */
++
+ /** Base file system setup **/
+ static struct aa_fs_entry aa_fs_entry_file[] = {
+@@ -210,6 +438,9 @@ static struct aa_fs_entry aa_fs_entry_apparmor[] = {
+       AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load),
+       AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
+       AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
++      AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops),
++#endif
+       AA_FS_DIR("features", aa_fs_entry_features),
+       { }
+ };
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index b21830e..a7d1a17 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+       return error;
+ }
+-static struct security_operations apparmor_ops = {
++static struct security_operations apparmor_ops __read_only = {
+       .name =                         "apparmor",
+       .ptrace_access_check =          apparmor_ptrace_access_check,
+diff --git a/security/commoncap.c b/security/commoncap.c
+index c44b6fe..932df30 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
+       return 0;
+ }
++/* returns:
++      1 for suid privilege
++      2 for sgid privilege
++      3 for fscap privilege
++*/
++int is_privileged_binary(const struct dentry *dentry)
++{
++      struct cpu_vfs_cap_data capdata;
++      struct inode *inode = dentry->d_inode;
++
++      if (!inode || S_ISDIR(inode->i_mode))
++              return 0;
++
++      if (inode->i_mode & S_ISUID)
++              return 1;
++      if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
++              return 2;
++
++      if (!get_vfs_caps_from_disk(dentry, &capdata)) {
++              if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
++                      return 3;
++      }
++
++      return 0;
++}
++
+ /*
+  * Attempt to get the on-exec apply capability sets for an executable file from
+  * its xattrs and, if present, apply them to the proposed credentials being
+@@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
+       const struct cred *cred = current_cred();
+       kuid_t root_uid = make_kuid(cred->user_ns, 0);
++      if (gr_acl_enable_at_secure())
++              return 1;
++
+       if (!uid_eq(cred->uid, root_uid)) {
+               if (bprm->cap_effective)
+                       return 1;
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index a41c9c1..83da6dd 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -97,8 +97,8 @@ int ima_init_crypto(void);
+ extern spinlock_t ima_queue_lock;
+ struct ima_h_table {
+-      atomic_long_t len;      /* number of stored measurements in the list */
+-      atomic_long_t violations;
++      atomic_long_unchecked_t len;    /* number of stored measurements in the list */
++      atomic_long_unchecked_t violations;
+       struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+ };
+ extern struct ima_h_table ima_htable;
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 1c03e8f1..398a941 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
+       int result;
+       /* can overflow, only indicator */
+-      atomic_long_inc(&ima_htable.violations);
++      atomic_long_inc_unchecked(&ima_htable.violations);
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
+index 38477c9..87a60c7 100644
+--- a/security/integrity/ima/ima_fs.c
++++ b/security/integrity/ima/ima_fs.c
+@@ -28,12 +28,12 @@
+ static int valid_policy = 1;
+ #define TMPBUFLEN 12
+ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+-                                   loff_t *ppos, atomic_long_t *val)
++                                   loff_t *ppos, atomic_long_unchecked_t *val)
+ {
+       char tmpbuf[TMPBUFLEN];
+       ssize_t len;
+-      len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
++      len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
+       return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+ }
+diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
+index ff63fe0..809cd96 100644
+--- a/security/integrity/ima/ima_queue.c
++++ b/security/integrity/ima/ima_queue.c
+@@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
+       INIT_LIST_HEAD(&qe->later);
+       list_add_tail_rcu(&qe->later, &ima_measurements);
+-      atomic_long_inc(&ima_htable.len);
++      atomic_long_inc_unchecked(&ima_htable.len);
+       key = ima_hash_key(entry->digest);
+       hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+       return 0;
+diff --git a/security/keys/compat.c b/security/keys/compat.c
+index d65fa7f..cbfe366 100644
+--- a/security/keys/compat.c
++++ b/security/keys/compat.c
+@@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
+       if (ret == 0)
+               goto no_payload_free;
+-      ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++      ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+ err:
+       if (iov != iovstack)
+               kfree(iov);
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index d4f1468..cc52f92 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -242,7 +242,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
+ extern long keyctl_invalidate_key(key_serial_t);
+ extern long keyctl_instantiate_key_common(key_serial_t,
+-                                        const struct iovec *,
++                                        const struct iovec __user *,
+                                         unsigned, size_t, key_serial_t);
+ /*
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 8fb7c7b..ba3610d 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -284,7 +284,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+       atomic_set(&key->usage, 1);
+       init_rwsem(&key->sem);
+-      lockdep_set_class(&key->sem, &type->lock_class);
++      lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
+       key->type = type;
+       key->user = user;
+       key->quotalen = quotalen;
+@@ -1032,7 +1032,9 @@ int register_key_type(struct key_type *ktype)
+       struct key_type *p;
+       int ret;
+-      memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
++      pax_open_kernel();
++      memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
++      pax_close_kernel();
+       ret = -EEXIST;
+       down_write(&key_types_sem);
+@@ -1044,7 +1046,7 @@ int register_key_type(struct key_type *ktype)
+       }
+       /* store the type */
+-      list_add(&ktype->link, &key_types_list);
++      pax_list_add((struct list_head *)&ktype->link, &key_types_list);
+       pr_notice("Key type %s registered\n", ktype->name);
+       ret = 0;
+@@ -1066,7 +1068,7 @@ EXPORT_SYMBOL(register_key_type);
+ void unregister_key_type(struct key_type *ktype)
+ {
+       down_write(&key_types_sem);
+-      list_del_init(&ktype->link);
++      pax_list_del_init((struct list_head *)&ktype->link);
+       downgrade_write(&key_types_sem);
+       key_gc_keytype(ktype);
+       pr_notice("Key type %s unregistered\n", ktype->name);
+@@ -1084,10 +1086,10 @@ void __init key_init(void)
+                       0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+       /* add the special key types */
+-      list_add_tail(&key_type_keyring.link, &key_types_list);
+-      list_add_tail(&key_type_dead.link, &key_types_list);
+-      list_add_tail(&key_type_user.link, &key_types_list);
+-      list_add_tail(&key_type_logon.link, &key_types_list);
++      pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
++      pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
++      pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
++      pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
+       /* record the root user tracking */
+       rb_link_node(&root_key_user.node,
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 33cfd27..842fc5a 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -987,7 +987,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
+ /*
+  * Copy the iovec data from userspace
+  */
+-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
+                                unsigned ioc)
+ {
+       for (; ioc > 0; ioc--) {
+@@ -1009,7 +1009,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
+  * If successful, 0 will be returned.
+  */
+ long keyctl_instantiate_key_common(key_serial_t id,
+-                                 const struct iovec *payload_iov,
++                                 const struct iovec __user *payload_iov,
+                                  unsigned ioc,
+                                  size_t plen,
+                                  key_serial_t ringid)
+@@ -1104,7 +1104,7 @@ long keyctl_instantiate_key(key_serial_t id,
+                       [0].iov_len  = plen
+               };
+-              return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
++              return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
+       }
+       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+@@ -1137,7 +1137,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
+       if (ret == 0)
+               goto no_payload_free;
+-      ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++      ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+ err:
+       if (iov != iovstack)
+               kfree(iov);
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 6ece7f2..ecdb55c 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
+                       ret = -EFAULT;
+                       for (loop = 0; loop < klist->nkeys; loop++) {
++                              key_serial_t serial;
+                               key = rcu_deref_link_locked(klist, loop,
+                                                           keyring);
++                              serial = key->serial;
+                               tmp = sizeof(key_serial_t);
+                               if (tmp > buflen)
+                                       tmp = buflen;
+-                              if (copy_to_user(buffer,
+-                                               &key->serial,
+-                                               tmp) != 0)
++                              if (copy_to_user(buffer, &serial, tmp))
+                                       goto error;
+                               buflen -= tmp;
+diff --git a/security/min_addr.c b/security/min_addr.c
+index f728728..6457a0c 100644
+--- a/security/min_addr.c
++++ b/security/min_addr.c
+@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+  */
+ static void update_mmap_min_addr(void)
+ {
++#ifndef SPARC
+ #ifdef CONFIG_LSM_MMAP_MIN_ADDR
+       if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+               mmap_min_addr = dac_mmap_min_addr;
+@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
+ #else
+       mmap_min_addr = dac_mmap_min_addr;
+ #endif
++#endif
+ }
+ /*
+diff --git a/security/security.c b/security/security.c
+index a3dce87..9ca1435 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -20,6 +20,7 @@
+ #include <linux/ima.h>
+ #include <linux/evm.h>
+ #include <linux/fsnotify.h>
++#include <linux/mm.h>
+ #include <linux/mman.h>
+ #include <linux/mount.h>
+ #include <linux/personality.h>
+@@ -32,8 +33,8 @@
+ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
+       CONFIG_DEFAULT_SECURITY;
+-static struct security_operations *security_ops;
+-static struct security_operations default_security_ops = {
++static struct security_operations *security_ops __read_only;
++static struct security_operations default_security_ops __read_only = {
+       .name   = "default",
+ };
+@@ -74,7 +75,9 @@ int __init security_init(void)
+ void reset_security_ops(void)
+ {
++      pax_open_kernel();
+       security_ops = &default_security_ops;
++      pax_close_kernel();
+ }
+ /* Save user chosen LSM */
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 5c6f2cd..b4f945c 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -96,8 +96,6 @@
+ #define NUM_SEL_MNT_OPTS 5
+-extern struct security_operations *security_ops;
+-
+ /* SECMARK reference count */
+ static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+@@ -5529,7 +5527,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
+ #endif
+-static struct security_operations selinux_ops = {
++static struct security_operations selinux_ops __read_only = {
+       .name =                         "selinux",
+       .ptrace_access_check =          selinux_ptrace_access_check,
+diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
+index 65f67cb..3f141ef 100644
+--- a/security/selinux/include/xfrm.h
++++ b/security/selinux/include/xfrm.h
+@@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+-      atomic_inc(&flow_cache_genid);
++      atomic_inc_unchecked(&flow_cache_genid);
+       rt_genid_bump(&init_net);
+ }
+ #else
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index d52c780..6431349 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+       return 0;
+ }
+-struct security_operations smack_ops = {
++struct security_operations smack_ops __read_only = {
+       .name =                         "smack",
+       .ptrace_access_check =          smack_ptrace_access_check,
+diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
+index 390c646..f2f8db3 100644
+--- a/security/tomoyo/mount.c
++++ b/security/tomoyo/mount.c
+@@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+                  type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
+               need_dev = -1; /* dev_name is a directory */
+       } else {
++              if (!capable(CAP_SYS_ADMIN)) {
++                      error = -EPERM;
++                      goto out;
++              }
+               fstype = get_fs_type(type);
+               if (!fstype) {
+                       error = -ENODEV;
+diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
+index f0b756e..b129202 100644
+--- a/security/tomoyo/tomoyo.c
++++ b/security/tomoyo/tomoyo.c
+@@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+  * tomoyo_security_ops is a "struct security_operations" which is used for
+  * registering TOMOYO.
+  */
+-static struct security_operations tomoyo_security_ops = {
++static struct security_operations tomoyo_security_ops __read_only = {
+       .name                = "tomoyo",
+       .cred_alloc_blank    = tomoyo_cred_alloc_blank,
+       .cred_prepare        = tomoyo_cred_prepare,
+diff --git a/security/yama/Kconfig b/security/yama/Kconfig
+index 20ef514..4182bed 100644
+--- a/security/yama/Kconfig
++++ b/security/yama/Kconfig
+@@ -1,6 +1,6 @@
+ config SECURITY_YAMA
+       bool "Yama support"
+-      depends on SECURITY
++      depends on SECURITY && !GRKERNSEC
+       select SECURITYFS
+       select SECURITY_PATH
+       default n
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index 13c88fbc..f8c115e 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent)
+ }
+ #ifndef CONFIG_SECURITY_YAMA_STACKED
+-static struct security_operations yama_ops = {
++static struct security_operations yama_ops __read_only = {
+       .name =                 "yama",
+       .ptrace_access_check =  yama_ptrace_access_check,
+@@ -376,28 +376,24 @@ static struct security_operations yama_ops = {
+ #endif
+ #ifdef CONFIG_SYSCTL
++static int zero __read_only;
++static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
++
+ static int yama_dointvec_minmax(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      int rc;
++      ctl_table_no_const yama_table;
+       if (write && !capable(CAP_SYS_PTRACE))
+               return -EPERM;
+-      rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-      if (rc)
+-              return rc;
+-
++      yama_table = *table;
+       /* Lock the max value if it ever gets set. */
+-      if (write && *(int *)table->data == *(int *)table->extra2)
+-              table->extra1 = table->extra2;
+-
+-      return rc;
++      if (ptrace_scope == max_scope)
++              yama_table.extra1 = &max_scope;
++      return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
+ }
+-static int zero;
+-static int max_scope = YAMA_SCOPE_NO_ATTACH;
+-
+ struct ctl_path yama_sysctl_path[] = {
+       { .procname = "kernel", },
+       { .procname = "yama", },
+diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
+index 4cedc69..e59d8a3 100644
+--- a/sound/aoa/codecs/onyx.c
++++ b/sound/aoa/codecs/onyx.c
+@@ -54,7 +54,7 @@ struct onyx {
+                               spdif_locked:1,
+                               analog_locked:1,
+                               original_mute:2;
+-      int                     open_count;
++      local_t                 open_count;
+       struct codec_info       *codec_info;
+       /* mutex serializes concurrent access to the device
+@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
+       struct onyx *onyx = cii->codec_data;
+       mutex_lock(&onyx->mutex);
+-      onyx->open_count++;
++      local_inc(&onyx->open_count);
+       mutex_unlock(&onyx->mutex);
+       return 0;
+@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
+       struct onyx *onyx = cii->codec_data;
+       mutex_lock(&onyx->mutex);
+-      onyx->open_count--;
+-      if (!onyx->open_count)
++      if (local_dec_and_test(&onyx->open_count))
+               onyx->spdif_locked = onyx->analog_locked = 0;
+       mutex_unlock(&onyx->mutex);
+diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
+index ffd2025..df062c9 100644
+--- a/sound/aoa/codecs/onyx.h
++++ b/sound/aoa/codecs/onyx.h
+@@ -11,6 +11,7 @@
+ #include <linux/i2c.h>
+ #include <asm/pmac_low_i2c.h>
+ #include <asm/prom.h>
++#include <asm/local.h>
+ /* PCM3052 register definitions */
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 4c1cc51..16040040 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
+               if (in_kernel) {
+                       mm_segment_t fs;
+                       fs = snd_enter_user();
+-                      ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++                      ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+                       snd_leave_user(fs);
+               } else {
+-                      ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++                      ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+               }
+               if (ret != -EPIPE && ret != -ESTRPIPE)
+                       break;
+@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
+               if (in_kernel) {
+                       mm_segment_t fs;
+                       fs = snd_enter_user();
+-                      ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++                      ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+                       snd_leave_user(fs);
+               } else {
+-                      ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++                      ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+               }
+               if (ret == -EPIPE) {
+                       if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
+               struct snd_pcm_plugin_channel *channels;
+               size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
+               if (!in_kernel) {
+-                      if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
++                      if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
+                               return -EFAULT;
+                       buf = runtime->oss.buffer;
+               }
+@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+                       }
+               } else {
+                       tmp = snd_pcm_oss_write2(substream,
+-                                               (const char __force *)buf,
++                                               (const char __force_kernel *)buf,
+                                                runtime->oss.period_bytes, 0);
+                       if (tmp <= 0)
+                               goto err;
+@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       snd_pcm_sframes_t frames, frames1;
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+-      char __user *final_dst = (char __force __user *)buf;
++      char __user *final_dst = (char __force_user *)buf;
+       if (runtime->oss.plugin_first) {
+               struct snd_pcm_plugin_channel *channels;
+               size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
+@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+                       xfer += tmp;
+                       runtime->oss.buffer_used -= tmp;
+               } else {
+-                      tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
++                      tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
+                                               runtime->oss.period_bytes, 0);
+                       if (tmp <= 0)
+                               goto err;
+@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+                                                                  size1);
+                                       size1 /= runtime->channels; /* frames */
+                                       fs = snd_enter_user();
+-                                      snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
++                                      snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
+                                       snd_leave_user(fs);
+                               }
+                       } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index af49721..e85058e 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
+       int err;
+       fs = snd_enter_user();
+-      err = snd_pcm_delay(substream, &delay);
++      err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
+       snd_leave_user(fs);
+       if (err < 0)
+               return err;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index f928181..33fb83d 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2819,11 +2819,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
+       switch (substream->stream) {
+       case SNDRV_PCM_STREAM_PLAYBACK:
+               result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
+-                                               (void __user *)arg);
++                                               (void __force_user *)arg);
+               break;
+       case SNDRV_PCM_STREAM_CAPTURE:
+               result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
+-                                              (void __user *)arg);
++                                              (void __force_user *)arg);
+               break;
+       default:
+               result = -EINVAL;
+diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
+index 040c60e..989a19a 100644
+--- a/sound/core/seq/seq_device.c
++++ b/sound/core/seq/seq_device.c
+@@ -64,7 +64,7 @@ struct ops_list {
+       int argsize;            /* argument size */
+       /* operators */
+-      struct snd_seq_dev_ops ops;
++      struct snd_seq_dev_ops *ops;
+       /* registered devices */
+       struct list_head dev_list;      /* list of devices */
+@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
+       mutex_lock(&ops->reg_mutex);
+       /* copy driver operators */
+-      ops->ops = *entry;
++      ops->ops = entry;
+       ops->driver |= DRIVER_LOADED;
+       ops->argsize = argsize;
+@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
+                          dev->name, ops->id, ops->argsize, dev->argsize);
+               return -EINVAL;
+       }
+-      if (ops->ops.init_device(dev) >= 0) {
++      if (ops->ops->init_device(dev) >= 0) {
+               dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
+               ops->num_init_devices++;
+       } else {
+@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
+                          dev->name, ops->id, ops->argsize, dev->argsize);
+               return -EINVAL;
+       }
+-      if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
++      if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
+               dev->status = SNDRV_SEQ_DEVICE_FREE;
+               dev->driver_data = NULL;
+               ops->num_init_devices--;
+diff --git a/sound/core/sound.c b/sound/core/sound.c
+index f002bd9..c462985 100644
+--- a/sound/core/sound.c
++++ b/sound/core/sound.c
+@@ -86,7 +86,7 @@ static void snd_request_other(int minor)
+       case SNDRV_MINOR_TIMER:         str = "snd-timer";      break;
+       default:                        return;
+       }
+-      request_module(str);
++      request_module("%s", str);
+ }
+ #endif        /* modular kernel */
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
+index 4e0dd22..7a1f32c 100644
+--- a/sound/drivers/mts64.c
++++ b/sound/drivers/mts64.c
+@@ -29,6 +29,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+ #define CARD_NAME "Miditerminal 4140"
+ #define DRIVER_NAME "MTS64"
+@@ -67,7 +68,7 @@ struct mts64 {
+       struct pardevice *pardev;
+       int pardev_claimed;
+-      int open_count;
++      local_t open_count;
+       int current_midi_output_port;
+       int current_midi_input_port;
+       u8 mode[MTS64_NUM_INPUT_PORTS];
+@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
+ {
+       struct mts64 *mts = substream->rmidi->private_data;
+-      if (mts->open_count == 0) {
++      if (local_read(&mts->open_count) == 0) {
+               /* We don't need a spinlock here, because this is just called 
+                  if the device has not been opened before. 
+                  So there aren't any IRQs from the device */
+@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
+               msleep(50);
+       }
+-      ++(mts->open_count);
++      local_inc(&mts->open_count);
+       return 0;
+ }
+@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
+       struct mts64 *mts = substream->rmidi->private_data;
+       unsigned long flags;
+-      --(mts->open_count);
+-      if (mts->open_count == 0) {
++      if (local_dec_return(&mts->open_count) == 0) {
+               /* We need the spinlock_irqsave here because we can still
+                  have IRQs at this point */
+               spin_lock_irqsave(&mts->lock, flags);
+@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
+               msleep(500);
+-      } else if (mts->open_count < 0)
+-              mts->open_count = 0;
++      } else if (local_read(&mts->open_count) < 0)
++              local_set(&mts->open_count, 0);
+       return 0;
+ }
+diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
+index b953fb4..1999c01 100644
+--- a/sound/drivers/opl4/opl4_lib.c
++++ b/sound/drivers/opl4/opl4_lib.c
+@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+ MODULE_DESCRIPTION("OPL4 driver");
+ MODULE_LICENSE("GPL");
+-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
++static inline void snd_opl4_wait(struct snd_opl4 *opl4)
+ {
+       int timeout = 10;
+       while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
+diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
+index 991018d..8984740 100644
+--- a/sound/drivers/portman2x4.c
++++ b/sound/drivers/portman2x4.c
+@@ -48,6 +48,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+ #define CARD_NAME "Portman 2x4"
+ #define DRIVER_NAME "portman"
+@@ -85,7 +86,7 @@ struct portman {
+       struct pardevice *pardev;
+       int pardev_claimed;
+-      int open_count;
++      local_t open_count;
+       int mode[PORTMAN_NUM_INPUT_PORTS];
+       struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
+ };
+diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
+index ea995af..f1bfa37 100644
+--- a/sound/firewire/amdtp.c
++++ b/sound/firewire/amdtp.c
+@@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
+               ptr = s->pcm_buffer_pointer + data_blocks;
+               if (ptr >= pcm->runtime->buffer_size)
+                       ptr -= pcm->runtime->buffer_size;
+-              ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
++              ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
+               s->pcm_period_pointer += data_blocks;
+               if (s->pcm_period_pointer >= pcm->runtime->period_size) {
+@@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
+  */
+ void amdtp_out_stream_update(struct amdtp_out_stream *s)
+ {
+-      ACCESS_ONCE(s->source_node_id_field) =
++      ACCESS_ONCE_RW(s->source_node_id_field) =
+               (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
+ }
+ EXPORT_SYMBOL(amdtp_out_stream_update);
+diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
+index b680c5e..061b7a0 100644
+--- a/sound/firewire/amdtp.h
++++ b/sound/firewire/amdtp.h
+@@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
+ static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
+                                               struct snd_pcm_substream *pcm)
+ {
+-      ACCESS_ONCE(s->pcm) = pcm;
++      ACCESS_ONCE_RW(s->pcm) = pcm;
+ }
+ static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
+diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
+index d428ffe..751ef78 100644
+--- a/sound/firewire/isight.c
++++ b/sound/firewire/isight.c
+@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
+       ptr += count;
+       if (ptr >= runtime->buffer_size)
+               ptr -= runtime->buffer_size;
+-      ACCESS_ONCE(isight->buffer_pointer) = ptr;
++      ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
+       isight->period_counter += count;
+       if (isight->period_counter >= runtime->period_size) {
+@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
+       if (err < 0)
+               return err;
+-      ACCESS_ONCE(isight->pcm_active) = true;
++      ACCESS_ONCE_RW(isight->pcm_active) = true;
+       return 0;
+ }
+@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
+ {
+       struct isight *isight = substream->private_data;
+-      ACCESS_ONCE(isight->pcm_active) = false;
++      ACCESS_ONCE_RW(isight->pcm_active) = false;
+       mutex_lock(&isight->mutex);
+       isight_stop_streaming(isight);
+@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+-              ACCESS_ONCE(isight->pcm_running) = true;
++              ACCESS_ONCE_RW(isight->pcm_running) = true;
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+-              ACCESS_ONCE(isight->pcm_running) = false;
++              ACCESS_ONCE_RW(isight->pcm_running) = false;
+               break;
+       default:
+               return -EINVAL;
+diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
+index 844a555..985ab83 100644
+--- a/sound/firewire/scs1x.c
++++ b/sound/firewire/scs1x.c
+@@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
+ {
+       struct scs *scs = stream->rmidi->private_data;
+-      ACCESS_ONCE(scs->output) = up ? stream : NULL;
++      ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
+       if (up) {
+               scs->output_idle = false;
+               tasklet_schedule(&scs->tasklet);
+@@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
+ {
+       struct scs *scs = stream->rmidi->private_data;
+-      ACCESS_ONCE(scs->input) = up ? stream : NULL;
++      ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
+ }
+ static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
+@@ -457,8 +457,8 @@ static int scs_remove(struct device *dev)
+       snd_card_disconnect(scs->card);
+-      ACCESS_ONCE(scs->output) = NULL;
+-      ACCESS_ONCE(scs->input) = NULL;
++      ACCESS_ONCE_RW(scs->output) = NULL;
++      ACCESS_ONCE_RW(scs->input) = NULL;
+       wait_event(scs->idle_wait, scs->output_idle);
+diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
+index 048439a..3be9f6f 100644
+--- a/sound/oss/sb_audio.c
++++ b/sound/oss/sb_audio.c
+@@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
+               buf16 = (signed short *)(localbuf + localoffs);
+               while (c)
+               {
+-                      locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
++                      locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+                       if (copy_from_user(lbuf8,
+                                          userbuf+useroffs + p,
+                                          locallen))
+diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
+index 7d8803a..559f8d0 100644
+--- a/sound/oss/swarm_cs4297a.c
++++ b/sound/oss/swarm_cs4297a.c
+@@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
+ {
+       struct cs4297a_state *s;
+       u32 pwr, id;
+-      mm_segment_t fs;
+       int rval;
+ #ifndef CONFIG_BCM_CS4297A_CSWARM
+       u64 cfg;
+@@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
+         if (!rval) {
+               char *sb1250_duart_present;
++#if 0
++                mm_segment_t fs;
+                 fs = get_fs();
+                 set_fs(KERNEL_DS);
+-#if 0
+                 val = SOUND_MASK_LINE;
+                 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
+                 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
+                         val = initvol[i].vol;
+                         mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
+                 }
++                set_fs(fs);
+ //                cs4297a_write_ac97(s, 0x18, 0x0808);
+ #else
+                 //                cs4297a_write_ac97(s, 0x5e, 0x180);
+                 cs4297a_write_ac97(s, 0x02, 0x0808);
+                 cs4297a_write_ac97(s, 0x18, 0x0808);
+ #endif
+-                set_fs(fs);
+                 list_add(&s->list, &cs4297a_devs);
+diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
+index 4631a23..001ae57 100644
+--- a/sound/pci/ymfpci/ymfpci.h
++++ b/sound/pci/ymfpci/ymfpci.h
+@@ -358,7 +358,7 @@ struct snd_ymfpci {
+       spinlock_t reg_lock;
+       spinlock_t voice_lock;
+       wait_queue_head_t interrupt_sleep;
+-      atomic_t interrupt_sleep_count;
++      atomic_unchecked_t interrupt_sleep_count;
+       struct snd_info_entry *proc_entry;
+       const struct firmware *dsp_microcode;
+       const struct firmware *controller_microcode;
+diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
+index 22056c5..25d3244 100644
+--- a/sound/pci/ymfpci/ymfpci_main.c
++++ b/sound/pci/ymfpci/ymfpci_main.c
+@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
+               if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
+                       break;
+       }
+-      if (atomic_read(&chip->interrupt_sleep_count)) {
+-              atomic_set(&chip->interrupt_sleep_count, 0);
++      if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++              atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+               wake_up(&chip->interrupt_sleep);
+       }
+       __end:
+@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
+                       continue;
+               init_waitqueue_entry(&wait, current);
+               add_wait_queue(&chip->interrupt_sleep, &wait);
+-              atomic_inc(&chip->interrupt_sleep_count);
++              atomic_inc_unchecked(&chip->interrupt_sleep_count);
+               schedule_timeout_uninterruptible(msecs_to_jiffies(50));
+               remove_wait_queue(&chip->interrupt_sleep, &wait);
+       }
+@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
+               snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
+               spin_unlock(&chip->reg_lock);
+-              if (atomic_read(&chip->interrupt_sleep_count)) {
+-                      atomic_set(&chip->interrupt_sleep_count, 0);
++              if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++                      atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+                       wake_up(&chip->interrupt_sleep);
+               }
+       }
+@@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
+       spin_lock_init(&chip->reg_lock);
+       spin_lock_init(&chip->voice_lock);
+       init_waitqueue_head(&chip->interrupt_sleep);
+-      atomic_set(&chip->interrupt_sleep_count, 0);
++      atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+       chip->card = card;
+       chip->pci = pci;
+       chip->irq = -1;
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 0f0bed6..c161e28 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -657,7 +657,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
+ {
+       struct fsl_ssi_private *ssi_private;
+       int ret = 0;
+-      struct device_attribute *dev_attr = NULL;
++      device_attribute_no_const *dev_attr = NULL;
+       struct device_node *np = pdev->dev.of_node;
+       const char *p, *sprop;
+       const uint32_t *iprop;
+diff --git a/sound/sound_core.c b/sound/sound_core.c
+index 359753f..45759f4 100644
+--- a/sound/sound_core.c
++++ b/sound/sound_core.c
+@@ -292,7 +292,7 @@ retry:
+       }
+       device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
+-                    NULL, s->name+6);
++                    NULL, "%s", s->name+6);
+       return s->unit_minor;
+ fail:
+diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
+new file mode 100644
+index 0000000..50f2f2f
+--- /dev/null
++++ b/tools/gcc/.gitignore
+@@ -0,0 +1 @@
++size_overflow_hash.h
+diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
+new file mode 100644
+index 0000000..144dbee
+--- /dev/null
++++ b/tools/gcc/Makefile
+@@ -0,0 +1,45 @@
++#CC := gcc
++#PLUGIN_SOURCE_FILES := pax_plugin.c
++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
++
++ifeq ($(PLUGINCC),$(HOSTCC))
++HOSTLIBS := hostlibs
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
++else
++HOSTLIBS := hostcxxlibs
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -ggdb -Wno-unused-parameter
++endif
++
++$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
++$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
++$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
++$(HOSTLIBS)-y += colorize_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
++
++always := $($(HOSTLIBS)-y)
++
++constify_plugin-objs := constify_plugin.o
++stackleak_plugin-objs := stackleak_plugin.o
++kallocstat_plugin-objs := kallocstat_plugin.o
++kernexec_plugin-objs := kernexec_plugin.o
++checker_plugin-objs := checker_plugin.o
++colorize_plugin-objs := colorize_plugin.o
++size_overflow_plugin-objs := size_overflow_plugin.o
++latent_entropy_plugin-objs := latent_entropy_plugin.o
++structleak_plugin-objs := structleak_plugin.o
++
++$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
++
++quiet_cmd_build_size_overflow_hash = GENHASH  $@
++      cmd_build_size_overflow_hash = \
++      $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
++      $(call if_changed,build_size_overflow_hash)
++
++targets += size_overflow_hash.h
+diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
+new file mode 100644
+index 0000000..22f03c0
+--- /dev/null
++++ b/tools/gcc/checker_plugin.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to implement various sparse (source code checker) features
++ *
++ * TODO:
++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "target.h"
++
++extern void c_register_addr_space (const char *str, addr_space_t as);
++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
++extern enum machine_mode default_addr_space_address_mode (addr_space_t);
++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info checker_plugin_info = {
++      .version        = "201111150100",
++      .help           = NULL,
++};
++
++#define ADDR_SPACE_KERNEL             0
++#define ADDR_SPACE_FORCE_KERNEL               1
++#define ADDR_SPACE_USER                       2
++#define ADDR_SPACE_FORCE_USER         3
++#define ADDR_SPACE_IOMEM              0
++#define ADDR_SPACE_FORCE_IOMEM                0
++#define ADDR_SPACE_PERCPU             0
++#define ADDR_SPACE_FORCE_PERCPU               0
++#define ADDR_SPACE_RCU                        0
++#define ADDR_SPACE_FORCE_RCU          0
++
++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
++{
++      return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
++}
++
++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
++{
++      return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
++}
++
++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
++{
++      return default_addr_space_valid_pointer_mode(mode, as);
++}
++
++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
++{
++      return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
++}
++
++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
++{
++      return default_addr_space_legitimize_address(x, oldx, mode, as);
++}
++
++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
++{
++      if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
++              return true;
++
++      if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
++              return true;
++
++      if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
++              return true;
++
++      if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
++              return true;
++
++      if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
++              return true;
++
++      if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
++              return true;
++
++      if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
++              return true;
++
++      return subset == superset;
++}
++
++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
++{
++//    addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
++//    addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
++
++      return op;
++}
++
++static void register_checker_address_spaces(void *event_data, void *data)
++{
++      c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
++      c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
++      c_register_addr_space("__user", ADDR_SPACE_USER);
++      c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
++//    c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
++//    c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
++//    c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
++//    c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
++//    c_register_addr_space("__rcu", ADDR_SPACE_RCU);
++//    c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
++
++      targetm.addr_space.pointer_mode         = checker_addr_space_pointer_mode;
++      targetm.addr_space.address_mode         = checker_addr_space_address_mode;
++      targetm.addr_space.valid_pointer_mode   = checker_addr_space_valid_pointer_mode;
++      targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
++//    targetm.addr_space.legitimize_address   = checker_addr_space_legitimize_address;
++      targetm.addr_space.subset_p             = checker_addr_space_subset_p;
++      targetm.addr_space.convert              = checker_addr_space_convert;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      int i;
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
++
++      for (i = 0; i < argc; ++i)
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++
++      if (TARGET_64BIT == 0)
++              return 0;
++
++      register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
++
++      return 0;
++}
+diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
+new file mode 100644
+index 0000000..414fe5e
+--- /dev/null
++++ b/tools/gcc/colorize_plugin.c
+@@ -0,0 +1,151 @@
++/*
++ * Copyright 2012-2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to colorize diagnostic output
++ *
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info colorize_plugin_info = {
++      .version        = "201302112000",
++      .help           = NULL,
++};
++
++#define GREEN         "\033[32m\033[2m"
++#define LIGHTGREEN    "\033[32m\033[1m"
++#define YELLOW                "\033[33m\033[2m"
++#define LIGHTYELLOW   "\033[33m\033[1m"
++#define RED           "\033[31m\033[2m"
++#define LIGHTRED      "\033[31m\033[1m"
++#define BLUE          "\033[34m\033[2m"
++#define LIGHTBLUE     "\033[34m\033[1m"
++#define BRIGHT                "\033[m\033[1m"
++#define NORMAL                "\033[m"
++
++static diagnostic_starter_fn old_starter;
++static diagnostic_finalizer_fn old_finalizer;
++
++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++      const char *color;
++      char *newprefix;
++
++      switch (diagnostic->kind) {
++      case DK_NOTE:
++              color = LIGHTBLUE;
++              break;
++
++      case DK_PEDWARN:
++      case DK_WARNING:
++              color = LIGHTYELLOW;
++              break;
++
++      case DK_ERROR:
++      case DK_FATAL:
++      case DK_ICE:
++      case DK_PERMERROR:
++      case DK_SORRY:
++              color = LIGHTRED;
++              break;
++
++      default:
++              color = NORMAL;
++      }
++
++      old_starter(context, diagnostic);
++      if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
++              return;
++      pp_destroy_prefix(context->printer);
++      pp_set_prefix(context->printer, newprefix);
++}
++
++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++      old_finalizer(context, diagnostic);
++}
++
++static void colorize_arm(void)
++{
++      old_starter = diagnostic_starter(global_dc);
++      old_finalizer = diagnostic_finalizer(global_dc);
++
++      diagnostic_starter(global_dc) = start_colorize;
++      diagnostic_finalizer(global_dc) = finalize_colorize;
++}
++
++static unsigned int execute_colorize_rearm(void)
++{
++      if (diagnostic_starter(global_dc) == start_colorize)
++              return 0;
++
++      colorize_arm();
++      return 0;
++}
++
++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
++      .pass = {
++              .type                   = SIMPLE_IPA_PASS,
++              .name                   = "colorize_rearm",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = NULL,
++              .execute                = execute_colorize_rearm,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = 0
++      }
++};
++
++static void colorize_start_unit(void *gcc_data, void *user_data)
++{
++      colorize_arm();
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      struct register_pass_info colorize_rearm_pass_info = {
++              .pass                           = &pass_ipa_colorize_rearm.pass,
++              .reference_pass_name            = "*free_lang_data",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
++      register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
++      return 0;
++}
+diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
+new file mode 100644
+index 0000000..c17312d
+--- /dev/null
++++ b/tools/gcc/constify_plugin.c
+@@ -0,0 +1,560 @@
++/*
++ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/const_plugin/
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
++ * $ gcc -fplugin=constify_plugin.so test.c -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "target.h"
++#include "langhooks.h"
++
++// should come from c-tree.h if only it were installed for gcc 4.5...
++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
++
++// unused type flag in all versions 4.5-4.8
++#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info const_plugin_info = {
++      .version        = "201305231310",
++      .help           = "no-constify\tturn off constification\n",
++};
++
++typedef struct {
++      bool has_fptr_field;
++      bool has_writable_field;
++      bool has_do_const_field;
++      bool has_no_const_field;
++} constify_info;
++
++static const_tree get_field_type(const_tree field)
++{
++      return strip_array_types(TREE_TYPE(field));
++}
++
++static bool is_fptr(const_tree field)
++{
++      const_tree ptr = get_field_type(field);
++
++      if (TREE_CODE(ptr) != POINTER_TYPE)
++              return false;
++
++      return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
++}
++
++/*
++ * determine whether the given structure type meets the requirements for automatic constification,
++ * including the constification attributes on nested structure types
++ */
++static void constifiable(const_tree node, constify_info *cinfo)
++{
++      const_tree field;
++
++      gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
++
++      // e.g., pointer to structure fields while still constructing the structure type
++      if (TYPE_FIELDS(node) == NULL_TREE)
++              return;
++
++      for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
++              const_tree type = get_field_type(field);
++              enum tree_code code = TREE_CODE(type);
++
++              if (node == type)
++                      continue;
++
++              if (is_fptr(field))
++                      cinfo->has_fptr_field = true;
++              else if (!TREE_READONLY(field))
++                      cinfo->has_writable_field = true;
++
++              if (code == RECORD_TYPE || code == UNION_TYPE) {
++                      if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++                              cinfo->has_do_const_field = true;
++                      else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
++                              cinfo->has_no_const_field = true;
++                      else
++                              constifiable(type, cinfo);
++              }
++      }
++}
++
++static bool constified(const_tree node)
++{
++      constify_info cinfo = {
++              .has_fptr_field = false,
++              .has_writable_field = false,
++              .has_do_const_field = false,
++              .has_no_const_field = false
++      };
++
++      gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
++
++      if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
++              gcc_assert(!TYPE_READONLY(node));
++              return false;
++      }
++
++      if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
++              gcc_assert(TYPE_READONLY(node));
++              return true;
++      }
++
++      constifiable(node, &cinfo);
++      if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
++              return false;
++
++      return TYPE_READONLY(node);
++}
++
++static void deconstify_tree(tree node);
++
++static void deconstify_type(tree type)
++{
++      tree field;
++
++      gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
++
++      for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
++              const_tree fieldtype = get_field_type(field);
++
++              // special case handling of simple ptr-to-same-array-type members
++              if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
++                      const_tree ptrtype = TREE_TYPE(TREE_TYPE(field));
++
++                      if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
++                              continue;
++                      if (TREE_TYPE(TREE_TYPE(field)) == type)
++                              continue;
++                      if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
++                              TREE_TYPE(field) = copy_node(TREE_TYPE(field));
++                              TREE_TYPE(TREE_TYPE(field)) = type;
++                      }
++                      continue;
++              }
++              if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
++                      continue;
++              if (!constified(fieldtype))
++                      continue;
++
++              deconstify_tree(field);
++              TREE_READONLY(field) = 0;
++      }
++      TYPE_READONLY(type) = 0;
++      C_TYPE_FIELDS_READONLY(type) = 0;
++      if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++              TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
++}
++
++static void deconstify_tree(tree node)
++{
++      tree old_type, new_type, field;
++
++      old_type = TREE_TYPE(node);
++      while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
++              node = TREE_TYPE(node) = copy_node(old_type);
++              old_type = TREE_TYPE(old_type);
++      }
++
++      gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
++      gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
++
++      new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
++      TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
++      for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
++              DECL_FIELD_CONTEXT(field) = new_type;
++
++      deconstify_type(new_type);
++
++      TREE_TYPE(node) = new_type;
++}
++
++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++      tree type;
++      constify_info cinfo = {
++              .has_fptr_field = false,
++              .has_writable_field = false,
++              .has_do_const_field = false,
++              .has_no_const_field = false
++      };
++
++      *no_add_attrs = true;
++      if (TREE_CODE(*node) == FUNCTION_DECL) {
++              error("%qE attribute does not apply to functions", name);
++              return NULL_TREE;
++      }
++
++      if (TREE_CODE(*node) == PARM_DECL) {
++              error("%qE attribute does not apply to function parameters", name);
++              return NULL_TREE;
++      }
++
++      if (TREE_CODE(*node) == VAR_DECL) {
++              error("%qE attribute does not apply to variables", name);
++              return NULL_TREE;
++      }
++
++      if (TYPE_P(*node)) {
++              *no_add_attrs = false;
++              type = *node;
++      } else {
++              gcc_assert(TREE_CODE(*node) == TYPE_DECL);
++              type = TREE_TYPE(*node);
++      }
++
++      if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
++              error("%qE attribute applies to struct and union types only", name);
++              return NULL_TREE;
++      }
++
++      if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
++              error("%qE attribute is already applied to the type", name);
++              return NULL_TREE;
++      }
++
++      if (TYPE_P(*node)) {
++              if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++                      error("%qE attribute is incompatible with 'do_const'", name);
++              return NULL_TREE;
++      }
++
++      constifiable(type, &cinfo);
++      if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
++              deconstify_tree(*node);
++              TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
++              return NULL_TREE;
++      }
++
++      error("%qE attribute used on type that is not constified", name);
++      return NULL_TREE;
++}
++
++static void constify_type(tree type)
++{
++      TYPE_READONLY(type) = 1;
++      C_TYPE_FIELDS_READONLY(type) = 1;
++      TYPE_CONSTIFY_VISITED(type) = 1;
++//    TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
++}
++
++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++      *no_add_attrs = true;
++      if (!TYPE_P(*node)) {
++              error("%qE attribute applies to types only", name);
++              return NULL_TREE;
++      }
++
++      if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
++              error("%qE attribute applies to struct and union types only", name);
++              return NULL_TREE;
++      }
++
++      if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
++              error("%qE attribute is already applied to the type", name);
++              return NULL_TREE;
++      }
++
++      if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
++              error("%qE attribute is incompatible with 'no_const'", name);
++              return NULL_TREE;
++      }
++
++      *no_add_attrs = false;
++      return NULL_TREE;
++}
++
++static struct attribute_spec no_const_attr = {
++      .name                   = "no_const",
++      .min_length             = 0,
++      .max_length             = 0,
++      .decl_required          = false,
++      .type_required          = false,
++      .function_type_required = false,
++      .handler                = handle_no_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity  = true
++#endif
++};
++
++static struct attribute_spec do_const_attr = {
++      .name                   = "do_const",
++      .min_length             = 0,
++      .max_length             = 0,
++      .decl_required          = false,
++      .type_required          = false,
++      .function_type_required = false,
++      .handler                = handle_do_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity  = true
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++      register_attribute(&no_const_attr);
++      register_attribute(&do_const_attr);
++}
++
++static void finish_type(void *event_data, void *data)
++{
++      tree type = (tree)event_data;
++      constify_info cinfo = {
++              .has_fptr_field = false,
++              .has_writable_field = false,
++              .has_do_const_field = false,
++              .has_no_const_field = false
++      };
++
++      if (type == NULL_TREE || type == error_mark_node)
++              return;
++
++      if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
++              return;
++
++      constifiable(type, &cinfo);
++
++      if (TYPE_READONLY(type) && C_TYPE_FIELDS_READONLY(type)) {
++              if (!lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++                      return;
++              if (cinfo.has_writable_field)
++                      return;
++              error("'do_const' attribute used on type that is%sconstified", cinfo.has_fptr_field ? " " : " not ");
++              return;
++      }
++
++      if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
++              if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
++                      deconstify_type(type);
++                      TYPE_CONSTIFY_VISITED(type) = 1;
++              } else
++                      error("'no_const' attribute used on type that is not constified");
++              return;
++      }
++
++      if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
++              constify_type(type);
++              return;
++      }
++
++      if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
++              constify_type(type);
++              return;
++      }
++
++      deconstify_type(type);
++      TYPE_CONSTIFY_VISITED(type) = 1;
++}
++
++static void check_global_variables(void)
++{
++      struct varpool_node *node;
++
++#if BUILDING_GCC_VERSION <= 4007
++      for (node = varpool_nodes; node; node = node->next) {
++              tree var = node->decl;
++#else
++      FOR_EACH_VARIABLE(node) {
++              tree var = node->symbol.decl;
++#endif
++              tree type = TREE_TYPE(var);
++
++              if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++                      continue;
++
++              if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
++                      continue;
++
++              if (!TYPE_CONSTIFY_VISITED(type))
++                      continue;
++
++              if (DECL_EXTERNAL(var))
++                      continue;
++
++              if (DECL_INITIAL(var))
++                      continue;
++
++              // this works around a gcc bug/feature where uninitialized globals
++              // are moved into the .bss section regardless of any constification
++              DECL_INITIAL(var) = build_constructor(type, NULL);
++//            inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var);
++      }
++}
++
++static unsigned int check_local_variables(void)
++{
++      unsigned int ret = 0;
++      tree var;
++
++#if BUILDING_GCC_VERSION == 4005
++      tree vars;
++#else
++      unsigned int i;
++#endif
++
++#if BUILDING_GCC_VERSION == 4005
++      for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
++              var = TREE_VALUE(vars);
++#else
++      FOR_EACH_LOCAL_DECL(cfun, i, var) {
++#endif
++              tree type = TREE_TYPE(var);
++
++              gcc_assert(DECL_P(var));
++              if (is_global_var(var))
++                      continue;
++
++              if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++                      continue;
++
++              if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
++                      continue;
++
++              if (!TYPE_CONSTIFY_VISITED(type))
++                      continue;
++
++              error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
++              ret = 1;
++      }
++      return ret;
++}
++
++static unsigned int check_variables(void)
++{
++      check_global_variables();
++      return check_local_variables();
++}
++
++      unsigned int ret = 0;
++static struct gimple_opt_pass pass_local_variable = {
++      {
++              .type                   = GIMPLE_PASS,
++              .name                   = "check_variables",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = NULL,
++              .execute                = check_variables,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = 0
++      }
++};
++
++static struct {
++      const char *name;
++      const char *asm_op;
++} sections[] = {
++      {".init.rodata",     "\t.section\t.init.rodata,\"a\""},
++      {".ref.rodata",      "\t.section\t.ref.rodata,\"a\""},
++      {".devinit.rodata",  "\t.section\t.devinit.rodata,\"a\""},
++      {".devexit.rodata",  "\t.section\t.devexit.rodata,\"a\""},
++      {".cpuinit.rodata",  "\t.section\t.cpuinit.rodata,\"a\""},
++      {".cpuexit.rodata",  "\t.section\t.cpuexit.rodata,\"a\""},
++      {".meminit.rodata",  "\t.section\t.meminit.rodata,\"a\""},
++      {".memexit.rodata",  "\t.section\t.memexit.rodata,\"a\""},
++      {".data..read_only", "\t.section\t.data..read_only,\"a\""},
++};
++
++static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
++
++static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
++{
++      size_t i;
++
++      for (i = 0; i < ARRAY_SIZE(sections); i++)
++              if (!strcmp(sections[i].name, name))
++                      return 0;
++      return old_section_type_flags(decl, name, reloc);
++}
++
++static void constify_start_unit(void *gcc_data, void *user_data)
++{
++//    size_t i;
++
++//    for (i = 0; i < ARRAY_SIZE(sections); i++)
++//            sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
++//            sections[i].section = get_section(sections[i].name, 0, NULL);
++
++      old_section_type_flags = targetm.section_type_flags;
++      targetm.section_type_flags = constify_section_type_flags;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      int i;
++      bool constify = true;
++
++      struct register_pass_info local_variable_pass_info = {
++              .pass                           = &pass_local_variable.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_BEFORE
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      for (i = 0; i < argc; ++i) {
++              if (!(strcmp(argv[i].key, "no-constify"))) {
++                      constify = false;
++                      continue;
++              }
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++      }
++
++      if (strcmp(lang_hooks.name, "GNU C")) {
++              inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
++              constify = false;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
++      if (constify) {
++              register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
++              register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
++              register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
++      }
++      register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++      return 0;
++}
+diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
+new file mode 100644
+index 0000000..e518932
+--- /dev/null
++++ b/tools/gcc/generate_size_overflow_hash.sh
+@@ -0,0 +1,94 @@
++#!/bin/bash
++
++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
++
++header1="size_overflow_hash.h"
++database="size_overflow_hash.data"
++n=65536
++
++usage() {
++cat <<EOF
++usage: $0 options
++OPTIONS:
++        -h|--help               help
++      -o                      header file
++      -d                      database file
++      -n                      hash array size
++EOF
++    return 0
++}
++
++while true
++do
++    case "$1" in
++    -h|--help)        usage && exit 0;;
++    -n)               n=$2; shift 2;;
++    -o)               header1="$2"; shift 2;;
++    -d)               database="$2"; shift 2;;
++    --)               shift 1; break ;;
++     *)               break ;;
++    esac
++done
++
++create_defines() {
++      for i in `seq 0 31`
++      do
++              echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
++      done
++      echo >> "$header1"
++}
++
++create_structs() {
++      rm -f "$header1"
++
++      create_defines
++
++      cat "$database" | while read data
++      do
++              data_array=($data)
++              struct_hash_name="${data_array[0]}"
++              funcn="${data_array[1]}"
++              params="${data_array[2]}"
++              next="${data_array[4]}"
++
++              echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
++
++              echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
++              echo -en "\t.param\t= " >> "$header1"
++              line=
++              for param_num in ${params//-/ };
++              do
++                      line="${line}PARAM"$param_num"|"
++              done
++
++              echo -e "${line%?},\n};\n" >> "$header1"
++      done
++}
++
++create_headers() {
++      echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
++}
++
++create_array_elements() {
++      index=0
++      grep -v "nohasharray" $database | sort -n -k 4 | while read data
++      do
++              data_array=($data)
++              i="${data_array[3]}"
++              hash="${data_array[0]}"
++              while [[ $index -lt $i ]]
++              do
++                      echo -e "\t["$index"]\t= NULL," >> "$header1"
++                      index=$(($index + 1))
++              done
++              index=$(($index + 1))
++              echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
++      done
++      echo '};' >> $header1
++}
++
++create_structs
++create_headers
++create_array_elements
++
++exit 0
+diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
+new file mode 100644
+index 0000000..568b360
+--- /dev/null
++++ b/tools/gcc/kallocstat_plugin.c
+@@ -0,0 +1,170 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to find the distribution of k*alloc sizes
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static const char * const kalloc_functions[] = {
++      "__kmalloc",
++      "kmalloc",
++      "kmalloc_large",
++      "kmalloc_node",
++      "kmalloc_order",
++      "kmalloc_order_trace",
++      "kmalloc_slab",
++      "kzalloc",
++      "kzalloc_node",
++};
++
++static struct plugin_info kallocstat_plugin_info = {
++      .version        = "201302112000",
++};
++
++static unsigned int execute_kallocstat(void);
++
++static struct gimple_opt_pass kallocstat_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "kallocstat",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = NULL,
++              .execute                = execute_kallocstat,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = 0
++      }
++};
++
++static bool is_kalloc(const char *fnname)
++{
++      size_t i;
++
++      for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
++              if (!strcmp(fnname, kalloc_functions[i]))
++                      return true;
++      return false;
++}
++
++static unsigned int execute_kallocstat(void)
++{
++      basic_block bb;
++
++      // 1. loop through BBs and GIMPLE statements
++      FOR_EACH_BB(bb) {
++              gimple_stmt_iterator gsi;
++              for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++                      // gimple match: 
++                      tree fndecl, size;
++                      gimple call_stmt;
++                      const char *fnname;
++
++                      // is it a call
++                      call_stmt = gsi_stmt(gsi);
++                      if (!is_gimple_call(call_stmt))
++                              continue;
++                      fndecl = gimple_call_fndecl(call_stmt);
++                      if (fndecl == NULL_TREE)
++                              continue;
++                      if (TREE_CODE(fndecl) != FUNCTION_DECL)
++                              continue;
++
++                      // is it a call to k*alloc
++                      fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
++                      if (!is_kalloc(fnname))
++                              continue;
++
++                      // is the size arg the result of a simple const assignment
++                      size = gimple_call_arg(call_stmt, 0);
++                      while (true) {
++                              gimple def_stmt;
++                              expanded_location xloc;
++                              size_t size_val;
++
++                              if (TREE_CODE(size) != SSA_NAME)
++                                      break;
++                              def_stmt = SSA_NAME_DEF_STMT(size);
++                              if (!def_stmt || !is_gimple_assign(def_stmt))
++                                      break;
++                              if (gimple_num_ops(def_stmt) != 2)
++                                      break;
++                              size = gimple_assign_rhs1(def_stmt);
++                              if (!TREE_CONSTANT(size))
++                                      continue;
++                              xloc = expand_location(gimple_location(def_stmt));
++                              if (!xloc.file)
++                                      xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++                              size_val = TREE_INT_CST_LOW(size);
++                              fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
++                              break;
++                      }
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++//debug_tree(gimple_call_fn(call_stmt));
++//print_node(stderr, "pax", fndecl, 4);
++              }
++      }
++
++      return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      struct register_pass_info kallocstat_pass_info = {
++              .pass                           = &kallocstat_pass.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
++
++      return 0;
++}
+diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
+new file mode 100644
+index 0000000..0408e06
+--- /dev/null
++++ b/tools/gcc/kernexec_plugin.c
+@@ -0,0 +1,465 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++#if BUILDING_GCC_VERSION <= 4006
++#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
++#endif
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info kernexec_plugin_info = {
++      .version        = "201302112000",
++      .help           = "method=[bts|or]\tinstrumentation method\n"
++};
++
++static unsigned int execute_kernexec_reload(void);
++static unsigned int execute_kernexec_fptr(void);
++static unsigned int execute_kernexec_retaddr(void);
++static bool kernexec_cmodel_check(void);
++
++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
++static void (*kernexec_instrument_retaddr)(rtx);
++
++static struct gimple_opt_pass kernexec_reload_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "kernexec_reload",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = kernexec_cmodel_check,
++              .execute                = execute_kernexec_reload,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++      }
++};
++
++static struct gimple_opt_pass kernexec_fptr_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "kernexec_fptr",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = kernexec_cmodel_check,
++              .execute                = execute_kernexec_fptr,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++      }
++};
++
++static struct rtl_opt_pass kernexec_retaddr_pass = {
++      .pass = {
++              .type                   = RTL_PASS,
++              .name                   = "kernexec_retaddr",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = kernexec_cmodel_check,
++              .execute                = execute_kernexec_retaddr,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_dump_func | TODO_ggc_collect
++      }
++};
++
++static bool kernexec_cmodel_check(void)
++{
++      tree section;
++
++      if (ix86_cmodel != CM_KERNEL)
++              return false;
++
++      section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
++      if (!section || !TREE_VALUE(section))
++              return true;
++
++      section = TREE_VALUE(TREE_VALUE(section));
++      if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
++              return true;
++
++      return false;
++}
++
++/*
++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
++ */
++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
++{
++      gimple asm_movabs_stmt;
++
++      // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
++      asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
++      gimple_asm_set_volatile(asm_movabs_stmt, true);
++      gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
++      update_stmt(asm_movabs_stmt);
++}
++
++/*
++ * find all asm() stmts that clobber r10 and add a reload of r10
++ */
++static unsigned int execute_kernexec_reload(void)
++{
++      basic_block bb;
++
++      // 1. loop through BBs and GIMPLE statements
++      FOR_EACH_BB(bb) {
++              gimple_stmt_iterator gsi;
++
++              for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++                      // gimple match: __asm__ ("" :  :  : "r10");
++                      gimple asm_stmt;
++                      size_t nclobbers;
++
++                      // is it an asm ...
++                      asm_stmt = gsi_stmt(gsi);
++                      if (gimple_code(asm_stmt) != GIMPLE_ASM)
++                              continue;
++
++                      // ... clobbering r10
++                      nclobbers = gimple_asm_nclobbers(asm_stmt);
++                      while (nclobbers--) {
++                              tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
++                              if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
++                                      continue;
++                              kernexec_reload_fptr_mask(&gsi);
++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
++                              break;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++/*
++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
++ */
++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
++{
++      gimple assign_intptr, assign_new_fptr, call_stmt;
++      tree intptr, old_fptr, new_fptr, kernexec_mask;
++
++      call_stmt = gsi_stmt(*gsi);
++      old_fptr = gimple_call_fn(call_stmt);
++
++      // create temporary unsigned long variable used for bitops and cast fptr to it
++      intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(intptr);
++      mark_sym_for_renaming(intptr);
++#endif
++      assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
++      gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++      update_stmt(assign_intptr);
++
++      // apply logical or to temporary unsigned long and bitmask
++      kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
++//    kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
++      assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
++      gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++      update_stmt(assign_intptr);
++
++      // cast temporary unsigned long back to a temporary fptr variable
++      new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(new_fptr);
++      mark_sym_for_renaming(new_fptr);
++#endif
++      assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
++      gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
++      update_stmt(assign_new_fptr);
++
++      // replace call stmt fn with the new fptr
++      gimple_call_set_fn(call_stmt, new_fptr);
++      update_stmt(call_stmt);
++}
++
++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
++{
++      gimple asm_or_stmt, call_stmt;
++      tree old_fptr, new_fptr, input, output;
++#if BUILDING_GCC_VERSION <= 4007
++      VEC(tree, gc) *inputs = NULL;
++      VEC(tree, gc) *outputs = NULL;
++#else
++      vec<tree, va_gc> *inputs = NULL;
++      vec<tree, va_gc> *outputs = NULL;
++#endif
++
++      call_stmt = gsi_stmt(*gsi);
++      old_fptr = gimple_call_fn(call_stmt);
++
++      // create temporary fptr variable
++      new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(new_fptr);
++      mark_sym_for_renaming(new_fptr);
++#endif
++
++      // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
++      input = build_tree_list(NULL_TREE, build_string(2, "0"));
++      input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
++      output = build_tree_list(NULL_TREE, build_string(3, "=r"));
++      output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
++#if BUILDING_GCC_VERSION <= 4007
++      VEC_safe_push(tree, gc, inputs, input);
++      VEC_safe_push(tree, gc, outputs, output);
++#else
++      vec_safe_push(inputs, input);
++      vec_safe_push(outputs, output);
++#endif
++      asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
++      gimple_asm_set_volatile(asm_or_stmt, true);
++      gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
++      update_stmt(asm_or_stmt);
++
++      // replace call stmt fn with the new fptr
++      gimple_call_set_fn(call_stmt, new_fptr);
++      update_stmt(call_stmt);
++}
++
++/*
++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
++ */
++static unsigned int execute_kernexec_fptr(void)
++{
++      basic_block bb;
++
++      // 1. loop through BBs and GIMPLE statements
++      FOR_EACH_BB(bb) {
++              gimple_stmt_iterator gsi;
++
++              for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++                      // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
++                      tree fn;
++                      gimple call_stmt;
++
++                      // is it a call ...
++                      call_stmt = gsi_stmt(gsi);
++                      if (!is_gimple_call(call_stmt))
++                              continue;
++                      fn = gimple_call_fn(call_stmt);
++                      if (TREE_CODE(fn) == ADDR_EXPR)
++                              continue;
++                      if (TREE_CODE(fn) != SSA_NAME)
++                              gcc_unreachable();
++
++                      // ... through a function pointer
++                      if (SSA_NAME_VAR(fn) != NULL_TREE) {
++                              fn = SSA_NAME_VAR(fn);
++                              if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
++                                      debug_tree(fn);
++                                      gcc_unreachable();
++                              }
++                      }
++                      fn = TREE_TYPE(fn);
++                      if (TREE_CODE(fn) != POINTER_TYPE)
++                              continue;
++                      fn = TREE_TYPE(fn);
++                      if (TREE_CODE(fn) != FUNCTION_TYPE)
++                              continue;
++
++                      kernexec_instrument_fptr(&gsi);
++
++//debug_tree(gimple_call_fn(call_stmt));
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++              }
++      }
++
++      return 0;
++}
++
++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
++static void kernexec_instrument_retaddr_bts(rtx insn)
++{
++      rtx btsq;
++      rtvec argvec, constraintvec, labelvec;
++      int line;
++
++      // create asm volatile("btsq $63,(%%rsp)":::)
++      argvec = rtvec_alloc(0);
++      constraintvec = rtvec_alloc(0);
++      labelvec = rtvec_alloc(0);
++      line = expand_location(RTL_LOCATION(insn)).line;
++      btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++      MEM_VOLATILE_P(btsq) = 1;
++//    RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
++      emit_insn_before(btsq, insn);
++}
++
++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
++static void kernexec_instrument_retaddr_or(rtx insn)
++{
++      rtx orq;
++      rtvec argvec, constraintvec, labelvec;
++      int line;
++
++      // create asm volatile("orq %%r10,(%%rsp)":::)
++      argvec = rtvec_alloc(0);
++      constraintvec = rtvec_alloc(0);
++      labelvec = rtvec_alloc(0);
++      line = expand_location(RTL_LOCATION(insn)).line;
++      orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++      MEM_VOLATILE_P(orq) = 1;
++//    RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
++      emit_insn_before(orq, insn);
++}
++
++/*
++ * find all asm level function returns and forcibly set the highest bit of the return address
++ */
++static unsigned int execute_kernexec_retaddr(void)
++{
++      rtx insn;
++
++      // 1. find function returns
++      for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
++              // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
++              //            (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
++              //            (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
++              rtx body;
++
++              // is it a retn
++              if (!JUMP_P(insn))
++                      continue;
++              body = PATTERN(insn);
++              if (GET_CODE(body) == PARALLEL)
++                      body = XVECEXP(body, 0, 0);
++              if (!ANY_RETURN_P(body))
++                      continue;
++              kernexec_instrument_retaddr(insn);
++      }
++
++//    print_simple_rtl(stderr, get_insns());
++//    print_rtl(stderr, get_insns());
++
++      return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      int i;
++      struct register_pass_info kernexec_reload_pass_info = {
++              .pass                           = &kernexec_reload_pass.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++      struct register_pass_info kernexec_fptr_pass_info = {
++              .pass                           = &kernexec_fptr_pass.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++      struct register_pass_info kernexec_retaddr_pass_info = {
++              .pass                           = &kernexec_retaddr_pass.pass,
++              .reference_pass_name            = "pro_and_epilogue",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
++
++      if (TARGET_64BIT == 0)
++              return 0;
++
++      for (i = 0; i < argc; ++i) {
++              if (!strcmp(argv[i].key, "method")) {
++                      if (!argv[i].value) {
++                              error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++                              continue;
++                      }
++                      if (!strcmp(argv[i].value, "bts")) {
++                              kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
++                              kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
++                      } else if (!strcmp(argv[i].value, "or")) {
++                              kernexec_instrument_fptr = kernexec_instrument_fptr_or;
++                              kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
++                              fix_register("r10", 1, 1);
++                      } else
++                              error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++                      continue;
++              }
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++      }
++      if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
++              error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
++
++      if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
++              register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
++
++      return 0;
++}
+diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
+new file mode 100644
+index 0000000..b5395ba
+--- /dev/null
++++ b/tools/gcc/latent_entropy_plugin.c
+@@ -0,0 +1,327 @@
++/*
++ * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to help generate a little bit of entropy from program state,
++ * used during boot in the kernel
++ *
++ * TODO:
++ * - add ipa pass to identify not explicitly marked candidate functions
++ * - mix in more program state (function arguments/return values, loop variables, etc)
++ * - more instrumentation control via attribute parameters
++ *
++ * BUGS:
++ * - LTO needs -flto-partition=none for now
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "langhooks.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++int plugin_is_GPL_compatible;
++
++static tree latent_entropy_decl;
++
++static struct plugin_info latent_entropy_plugin_info = {
++      .version        = "201303102320",
++      .help           = NULL
++};
++
++static unsigned int execute_latent_entropy(void);
++static bool gate_latent_entropy(void);
++
++static struct gimple_opt_pass latent_entropy_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "latent_entropy",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = gate_latent_entropy,
++              .execute                = execute_latent_entropy,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = PROP_gimple_leh | PROP_cfg,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++              .todo_flags_finish      = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++      }
++};
++
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++      if (TREE_CODE(*node) != FUNCTION_DECL) {
++              *no_add_attrs = true;
++              error("%qE attribute only applies to functions", name);
++      }
++      return NULL_TREE;
++}
++
++static struct attribute_spec latent_entropy_attr = {
++      .name                           = "latent_entropy",
++      .min_length                     = 0,
++      .max_length                     = 0,
++      .decl_required                  = true,
++      .type_required                  = false,
++      .function_type_required         = false,
++      .handler                        = handle_latent_entropy_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity          = false
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++      register_attribute(&latent_entropy_attr);
++}
++
++static bool gate_latent_entropy(void)
++{
++      tree latent_entropy_attr;
++
++      latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
++      return latent_entropy_attr != NULL_TREE;
++}
++
++static unsigned HOST_WIDE_INT seed;
++static unsigned HOST_WIDE_INT get_random_const(void)
++{
++      seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
++      return seed;
++}
++
++static enum tree_code get_op(tree *rhs)
++{
++      static enum tree_code op;
++      unsigned HOST_WIDE_INT random_const;
++
++      random_const = get_random_const();
++
++      switch (op) {
++      case BIT_XOR_EXPR:
++              op = PLUS_EXPR;
++              break;
++
++      case PLUS_EXPR:
++              if (rhs) {
++                      op = LROTATE_EXPR;
++                      random_const &= HOST_BITS_PER_WIDE_INT - 1;
++                      break;
++              }
++
++      case LROTATE_EXPR:
++      default:
++              op = BIT_XOR_EXPR;
++              break;
++      }
++      if (rhs)
++              *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
++      return op;
++}
++
++static void perturb_local_entropy(basic_block bb, tree local_entropy)
++{
++      gimple_stmt_iterator gsi;
++      gimple assign;
++      tree addxorrol, rhs;
++      enum tree_code op;
++
++      op = get_op(&rhs);
++      addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
++      assign = gimple_build_assign(local_entropy, addxorrol);
++#if BUILDING_GCC_VERSION <= 4007
++      find_referenced_vars_in(assign);
++#endif
++//debug_bb(bb);
++      gsi = gsi_after_labels(bb);
++      gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++}
++
++static void perturb_latent_entropy(basic_block bb, tree rhs)
++{
++      gimple_stmt_iterator gsi;
++      gimple assign;
++      tree addxorrol, temp;
++
++      // 1. create temporary copy of latent_entropy
++      temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(temp);
++      mark_sym_for_renaming(temp);
++#endif
++
++      // 2. read...
++      assign = gimple_build_assign(temp, latent_entropy_decl);
++#if BUILDING_GCC_VERSION <= 4007
++      find_referenced_vars_in(assign);
++#endif
++      gsi = gsi_after_labels(bb);
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++
++      // 3. ...modify...
++      addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
++      assign = gimple_build_assign(temp, addxorrol);
++#if BUILDING_GCC_VERSION <= 4007
++      find_referenced_vars_in(assign);
++#endif
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++
++      // 4. ...write latent_entropy
++      assign = gimple_build_assign(latent_entropy_decl, temp);
++#if BUILDING_GCC_VERSION <= 4007
++      find_referenced_vars_in(assign);
++#endif
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++}
++
++static unsigned int execute_latent_entropy(void)
++{
++      basic_block bb;
++      gimple assign;
++      gimple_stmt_iterator gsi;
++      tree local_entropy;
++
++      if (!latent_entropy_decl) {
++              struct varpool_node *node;
++
++#if BUILDING_GCC_VERSION <= 4007
++              for (node = varpool_nodes; node; node = node->next) {
++                      tree var = node->decl;
++#else
++              FOR_EACH_VARIABLE(node) {
++                      tree var = node->symbol.decl;
++#endif
++                      if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
++                              continue;
++                      latent_entropy_decl = var;
++//                    debug_tree(var);
++                      break;
++              }
++              if (!latent_entropy_decl) {
++//                    debug_tree(current_function_decl);
++                      return 0;
++              }
++      }
++
++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
++      // 1. create local entropy variable
++      local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(local_entropy);
++      mark_sym_for_renaming(local_entropy);
++#endif
++
++      // 2. initialize local entropy variable
++      bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++      if (dom_info_available_p(CDI_DOMINATORS))
++              set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++      gsi = gsi_start_bb(bb);
++
++      assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
++//    gimple_set_location(assign, loc);
++#if BUILDING_GCC_VERSION <= 4007
++      find_referenced_vars_in(assign);
++#endif
++      gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++      bb = bb->next_bb;
++
++      // 3. instrument each BB with an operation on the local entropy variable
++      while (bb != EXIT_BLOCK_PTR) {
++              perturb_local_entropy(bb, local_entropy);
++              bb = bb->next_bb;
++      };
++
++      // 4. mix local entropy into the global entropy variable
++      perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
++      return 0;
++}
++
++static void start_unit_callback(void *gcc_data, void *user_data)
++{
++      tree latent_entropy_type;
++
++#if BUILDING_GCC_VERSION >= 4007
++      seed = get_random_seed(false);
++#else
++      sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
++      seed *= seed;
++#endif
++
++      if (in_lto_p)
++              return;
++
++      // extern volatile u64 latent_entropy
++      gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
++      latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
++      latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
++
++      TREE_STATIC(latent_entropy_decl) = 1;
++      TREE_PUBLIC(latent_entropy_decl) = 1;
++      TREE_USED(latent_entropy_decl) = 1;
++      TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
++      DECL_EXTERNAL(latent_entropy_decl) = 1;
++      DECL_ARTIFICIAL(latent_entropy_decl) = 1;
++      DECL_INITIAL(latent_entropy_decl) = NULL;
++      lang_hooks.decls.pushdecl(latent_entropy_decl);
++//    DECL_ASSEMBLER_NAME(latent_entropy_decl);
++//    varpool_finalize_decl(latent_entropy_decl);
++//    varpool_mark_needed_node(latent_entropy_decl);
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      struct register_pass_info latent_entropy_pass_info = {
++              .pass                           = &latent_entropy_pass.pass,
++              .reference_pass_name            = "optimized",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_BEFORE
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
++      register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
++      register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++      return 0;
++}
+diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
+new file mode 100644
+index 0000000..b04803b
+--- /dev/null
++++ b/tools/gcc/size_overflow_hash.data
+@@ -0,0 +1,6350 @@
++intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
++batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
++storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
++compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
++carl9170_alloc_27 carl9170_alloc 1 27 NULL
++sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
++padzero_55 padzero 1 55 &sel_read_policyvers_55
++cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
++__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
++crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 NULL
++snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
++load_msg_95 load_msg 2 95 NULL
++device_flush_iotlb_115 device_flush_iotlb 2-3 115 NULL
++init_q_132 init_q 4 132 NULL
++memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
++hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL
++tracing_trace_options_write_153 tracing_trace_options_write 3 153 NULL
++nvme_create_queue_170 nvme_create_queue 3 170 NULL
++xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
++iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
++virtblk_add_req_197 virtblk_add_req 2-3 197 NULL
++proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
++br_port_info_size_268 br_port_info_size 0 268 NULL
++generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
++read_file_war_stats_292 read_file_war_stats 3 292 NULL
++SYSC_connect_304 SYSC_connect 3 304 NULL
++syslog_print_307 syslog_print 2 307 NULL
++platform_device_add_data_310 platform_device_add_data 3 310 NULL
++dn_setsockopt_314 dn_setsockopt 5 314 NULL
++next_node_allowed_318 next_node_allowed 1 318 NULL
++compat_sys_ioctl_333 compat_sys_ioctl 3 333 NULL
++btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
++lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
++snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
++_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
++dccp_manip_pkt_476 dccp_manip_pkt 4 476 NULL
++nvme_trans_modesel_data_488 nvme_trans_modesel_data 4 488 NULL
++pidlist_resize_496 pidlist_resize 2 496 NULL
++read_vbt_r0_503 read_vbt_r0 1 503 NULL
++rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
++ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
++zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0 537 NULL
++iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
++dle_count_543 dle_count 0 543 NULL
++devres_alloc_551 devres_alloc 2 551 NULL
++snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
++dev_hard_header_565 dev_hard_header 0 565 NULL nohasharray
++start_isoc_chain_565 start_isoc_chain 2 565 &dev_hard_header_565
++compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
++smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
++ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
++fuse_request_alloc_nofs_617 fuse_request_alloc_nofs 1 617 NULL
++compat_sys_shmat_620 compat_sys_shmat 3 620 NULL
++isp1760_register_628 isp1760_register 1-2 628 NULL
++clone_split_bio_633 clone_split_bio 6 633 NULL
++ceph_osdc_new_request_635 ceph_osdc_new_request 6 635 NULL
++remap_to_cache_640 remap_to_cache 3 640 NULL
++drbd_bm_find_next_643 drbd_bm_find_next 2 643 NULL
++unlink_queued_645 unlink_queued 3-4 645 NULL
++dtim_interval_read_654 dtim_interval_read 3 654 NULL
++mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
++rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
++persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL
++ipath_resize_cq_712 ipath_resize_cq 2 712 NULL
++disk_max_parts_719 disk_max_parts 0 719 NULL
++sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
++dvb_video_write_754 dvb_video_write 3 754 NULL
++if_writecmd_815 if_writecmd 2 815 NULL
++aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
++read_fifo_826 read_fifo 3 826 NULL
++um_idi_read_850 um_idi_read 3 850 NULL
++ieee80211_if_fmt_rc_rateidx_mcs_mask_5ghz_856 ieee80211_if_fmt_rc_rateidx_mcs_mask_5ghz 3 856 NULL
++o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
++iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
++snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
++btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
++carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
++get_ramdisk_size_954 get_ramdisk_size 0 954 NULL
++__nodes_weight_956 __nodes_weight 2-0 956 NULL
++sys_msgrcv_959 sys_msgrcv 3 959 NULL
++pte_prefetch_gfn_to_pfn_997 pte_prefetch_gfn_to_pfn 2 997 NULL nohasharray
++hdlcdev_rx_997 hdlcdev_rx 3 997 &pte_prefetch_gfn_to_pfn_997
++dm_cache_set_dirty_1016 dm_cache_set_dirty 2 1016 NULL
++_do_truncate_1019 _do_truncate 2 1019 NULL
++smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
++gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
++Read_hfc16_1070 Read_hfc16 0 1070 NULL
++mce_request_packet_1073 mce_request_packet 3 1073 NULL
++agp_create_memory_1075 agp_create_memory 1 1075 NULL
++_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
++nfs_pgarray_set_1085 nfs_pgarray_set 2 1085 NULL
++llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
++nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL
++sys_mremap_1107 sys_mremap 5-1-2 1107 NULL
++cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL
++vmalloc_32_1135 vmalloc_32 1 1135 NULL
++dec_zcache_eph_zpages_1138 dec_zcache_eph_zpages 1 1138 NULL
++i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
++ipc_alloc_1192 ipc_alloc 1 1192 NULL
++ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
++i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
++dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
++i2cdev_read_1206 i2cdev_read 3 1206 NULL
++ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
++acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
++nested_get_page_1252 nested_get_page 2 1252 NULL
++ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
++qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
++ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
++batadv_tt_prepare_packet_buff_1280 batadv_tt_prepare_packet_buff 4 1280 NULL
++tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
++wm_adsp_buf_alloc_1317 wm_adsp_buf_alloc 2 1317 NULL
++compat_put_u64_1319 compat_put_u64 1 1319 NULL
++ffs_1322 ffs 0 1322 NULL
++qlcnic_pci_sriov_configure_1327 qlcnic_pci_sriov_configure 2 1327 NULL
++carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
++btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
++gen_pool_best_fit_1348 gen_pool_best_fit 2-3-4 1348 NULL
++io_mapping_create_wc_1354 io_mapping_create_wc 1-2 1354 NULL
++snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
++ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
++iov_num_pages_1364 iov_num_pages 0 1364 NULL
++fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
++ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
++sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
++do_msgsnd_1387 do_msgsnd 4 1387 NULL
++zone_page_state_1393 zone_page_state 0 1393 NULL
++file_read_actor_1401 file_read_actor 4 1401 NULL
++vb2_dc_get_user_pages_1442 vb2_dc_get_user_pages 1-3 1442 NULL
++stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
++tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
++xprt_alloc_1475 xprt_alloc 2 1475 NULL
++SYSC_syslog_1477 SYSC_syslog 3 1477 NULL
++sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
++posix_acl_permission_1495 posix_acl_permission 0 1495 NULL
++tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
++__vfio_dma_map_1523 __vfio_dma_map 3 1523 NULL
++alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
++ath6kl_init_get_fwcaps_1557 ath6kl_init_get_fwcaps 3 1557 NULL
++ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
++fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
++packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
++btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
++v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
++btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
++ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
++mei_cl_recv_1665 mei_cl_recv 3 1665 NULL
++netdev_feature_string_1667 netdev_feature_string 0 1667 NULL
++compat_x25_ioctl_1674 compat_x25_ioctl 3 1674 NULL
++rmap_add_1677 rmap_add 3 1677 NULL
++configfs_read_file_1683 configfs_read_file 3 1683 NULL
++coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
++btrfs_dir_data_len_1714 btrfs_dir_data_len 0 1714 NULL
++dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
++tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
++compat_cdrom_generic_command_1756 compat_cdrom_generic_command 4 1756 NULL
++ieee80211_new_mesh_header_1761 ieee80211_new_mesh_header 0 1761 NULL
++ebt_size_mwt_1768 ebt_size_mwt 0 1768 NULL
++cosa_write_1774 cosa_write 3 1774 NULL
++update_macheader_1775 update_macheader 7 1775 NULL
++dec_zcache_pers_zbytes_1779 dec_zcache_pers_zbytes 1 1779 NULL
++fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
++__nodelist_scnprintf_1815 __nodelist_scnprintf 0-2-4 1815 NULL
++alloc_pages_exact_1892 alloc_pages_exact 1 1892 NULL
++rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
++nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
++SyS_add_key_1900 SyS_add_key 4 1900 NULL
++isku_sysfs_write_keys_media_1910 isku_sysfs_write_keys_media 6 1910 NULL
++tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
++memblock_alloc_base_1938 memblock_alloc_base 1-2 1938 NULL
++cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
++ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
++read_swap_header_1957 read_swap_header 0 1957 NULL
++ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
++sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
++__alloc_bootmem_node_1992 __alloc_bootmem_node 2-3 1992 NULL
++atomic_read_unchecked_1995 atomic_read_unchecked 0 1995 NULL
++batadv_tt_commit_changes_2008 batadv_tt_commit_changes 4 2008 NULL
++sep_prepare_input_dma_table_2009 sep_prepare_input_dma_table 2-3 2009 NULL
++rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
++ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
++write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
++BcmCopySection_2035 BcmCopySection 5 2035 NULL
++devm_ioremap_nocache_2036 devm_ioremap_nocache 2-3 2036 NULL
++ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
++ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
++subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
++iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
++idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
++audit_expand_2098 audit_expand 2 2098 NULL
++num_pages_spanned_2105 num_pages_spanned 0 2105 NULL
++iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
++ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
++__find_xattr_2117 __find_xattr 6 2117 NULL nohasharray
++enable_read_2117 enable_read 3 2117 &__find_xattr_2117
++pcf50633_write_block_2124 pcf50633_write_block 2-3 2124 NULL
++check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
++lp_gpio_irq_map_2149 lp_gpio_irq_map 2 2149 NULL
++mlx4_init_icm_table_2151 mlx4_init_icm_table 5-4 2151 NULL
++iov_iter_count_2152 iov_iter_count 0 2152 NULL
++_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
++bio_integrity_alloc_2194 bio_integrity_alloc 3 2194 NULL
++ssb_bus_ssbbus_register_2217 ssb_bus_ssbbus_register 2 2217 NULL
++mei_dbgfs_read_meclients_2219 mei_dbgfs_read_meclients 3 2219 NULL nohasharray
++u32_array_read_2219 u32_array_read 3 2219 &mei_dbgfs_read_meclients_2219
++vhci_write_2224 vhci_write 3 2224 NULL
++efx_tsoh_page_count_2225 efx_tsoh_page_count 0 2225 NULL
++lowpan_get_mac_header_length_2231 lowpan_get_mac_header_length 0 2231 NULL
++ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
++netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
++sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
++do_update_counters_2259 do_update_counters 4 2259 NULL
++ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
++debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
++kvm_clear_guest_page_2308 kvm_clear_guest_page 4-2 2308 NULL
++intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
++picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
++gart_map_page_2325 gart_map_page 3-4 2325 NULL
++__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
++zr364xx_read_2354 zr364xx_read 3 2354 NULL
++viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
++SyS_mremap_2367 SyS_mremap 1-2-5 2367 NULL
++xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL
++il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
++rtl_port_map_2385 rtl_port_map 1-2 2385 NULL
++rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
++SYSC_mlock_2415 SYSC_mlock 1 2415 NULL
++isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
++raid1_size_2419 raid1_size 0-2 2419 NULL
++roccat_common2_send_2422 roccat_common2_send 4 2422 NULL
++hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
++ioremap_nocache_2439 ioremap_nocache 1-2 2439 NULL
++tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
++ath6kl_usb_bmi_write_2454 ath6kl_usb_bmi_write 3 2454 NULL
++b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
++update_pmkid_2481 update_pmkid 4 2481 NULL
++wiphy_new_2482 wiphy_new 2 2482 NULL
++bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
++lookup_cache_entry_2494 lookup_cache_entry 2 2494 NULL
++squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
++dm_write_2513 dm_write 3 2513 NULL
++v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
++ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
++gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
++i915_next_seqno_write_2572 i915_next_seqno_write 3 2572 NULL
++pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
++slot_bytes_2609 slot_bytes 0 2609 NULL
++smk_write_logging_2618 smk_write_logging 3 2618 NULL
++kvm_gfn_to_hva_cache_init_2636 kvm_gfn_to_hva_cache_init 3 2636 NULL
++lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
++nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL
++memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
++__xip_file_write_2733 __xip_file_write 4-3 2733 NULL
++hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
++mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL nohasharray
++bictcp_update_2771 bictcp_update 2 2771 &mon_bin_ioctl_2771
++__next_cpu_2782 __next_cpu 1 2782 NULL
++set_msr_hyperv_pw_2785 set_msr_hyperv_pw 3 2785 NULL
++sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
++vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL
++wait_for_avail_2847 wait_for_avail 0 2847 NULL
++ufs_free_fragments_2857 ufs_free_fragments 2 2857 NULL
++sfq_alloc_2861 sfq_alloc 1 2861 NULL
++move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
++mq_map_2871 mq_map 2 2871 NULL
++nla_padlen_2883 nla_padlen 1 2883 NULL
++cmm_write_2896 cmm_write 3 2896 NULL
++alloc_page_cgroup_2919 alloc_page_cgroup 1 2919 NULL
++xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
++nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
++tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
++set_fast_connectable_2952 set_fast_connectable 4 2952 NULL
++ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
++do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL
++p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
++do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
++depth_write_3021 depth_write 3 3021 NULL
++snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
++xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
++iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 NULL
++nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
++il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
++qp_alloc_ppn_set_3068 qp_alloc_ppn_set 2-4 3068 NULL
++__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL
++dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
++free_coherent_3082 free_coherent 4-2 3082 NULL
++clone_bio_3100 clone_bio 6 3100 NULL nohasharray
++ttusb2_msg_3100 ttusb2_msg 4 3100 &clone_bio_3100
++rb_alloc_3102 rb_alloc 1 3102 NULL
++simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
++print_time_3132 print_time 0 3132 NULL
++fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
++CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
++compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
++uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
++uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL
++compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
++mempool_create_node_3191 mempool_create_node 1 3191 NULL
++alloc_context_3194 alloc_context 1 3194 NULL
++shmem_pread_slow_3198 shmem_pread_slow 3 3198 NULL
++SyS_sendto_3219 SyS_sendto 6 3219 NULL
++kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
++do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
++ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
++key_key_read_3241 key_key_read 3 3241 NULL
++number_3243 number 0 3243 NULL
++check_vendor_extension_3254 check_vendor_extension 1 3254 NULL
++__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
++arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
++dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
++compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
++aac_rkt_ioremap_3333 aac_rkt_ioremap 2 3333 NULL
++read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
++tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
++il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
++gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
++scnprintf_3360 scnprintf 0-2 3360 NULL
++x86_emulate_instruction_3389 x86_emulate_instruction 2 3389 NULL
++mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
++send_stream_3397 send_stream 4 3397 NULL
++isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
++msix_map_region_3411 msix_map_region 3 3411 NULL
++mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL
++pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
++crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
++SyS_msgsnd_3436 SyS_msgsnd 3 3436 NULL
++pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
++percpu_modalloc_3448 percpu_modalloc 2-3 3448 NULL
++map_single_3449 map_single 0-2 3449 NULL
++jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 NULL nohasharray
++snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 4-2-5 3464 &jffs2_acl_setxattr_3464
++alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
++security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
++xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL
++kvm_handle_bad_page_3503 kvm_handle_bad_page 2 3503 NULL
++mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL nohasharray
++ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 &mem_tx_free_mem_blks_read_3521
++SyS_readv_3539 SyS_readv 3 3539 NULL
++btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
++b43legacy_read16_3561 b43legacy_read16 0 3561 NULL
++alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
++evtchn_read_3569 evtchn_read 3 3569 NULL
++vc_resize_3585 vc_resize 2-3 3585 NULL
++compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
++sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
++edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
++tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
++aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
++x86_swiotlb_alloc_coherent_3649 x86_swiotlb_alloc_coherent 2 3649 NULL nohasharray
++cm_copy_private_data_3649 cm_copy_private_data 2 3649 &x86_swiotlb_alloc_coherent_3649
++ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
++i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
++snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 NULL nohasharray
++create_irq_3703 create_irq 0 3703 &snd_m3_assp_read_3703 nohasharray
++btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 &create_irq_3703
++videobuf_pages_to_sg_3708 videobuf_pages_to_sg 2 3708 NULL
++ci_ll_write_3740 ci_ll_write 4 3740 NULL nohasharray
++ath6kl_mgmt_tx_3740 ath6kl_mgmt_tx 7 3740 &ci_ll_write_3740
++sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
++ncp_file_write_3813 ncp_file_write 3 3813 NULL
++read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
++stringify_nodemap_3842 stringify_nodemap 2 3842 NULL
++ubi_eba_read_leb_3847 ubi_eba_read_leb 0 3847 NULL
++create_one_cdev_3852 create_one_cdev 2 3852 NULL
++smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
++get_fd_set_3866 get_fd_set 1 3866 NULL
++garp_attr_create_3883 garp_attr_create 3 3883 NULL
++uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
++efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL
++nvram_write_3894 nvram_write 3 3894 NULL
++pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
++comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
++vcs_write_3910 vcs_write 3 3910 NULL
++brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL
++atalk_compat_ioctl_3991 atalk_compat_ioctl 3 3991 NULL
++do_add_counters_3992 do_add_counters 3 3992 NULL
++userspace_status_4004 userspace_status 4 4004 NULL
++mei_write_4005 mei_write 3 4005 NULL nohasharray
++xfs_check_block_4005 xfs_check_block 4 4005 &mei_write_4005
++snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
++mm_populate_4016 mm_populate 1 4016 NULL
++blk_end_request_4024 blk_end_request 3 4024 NULL
++ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
++usbnet_write_cmd_async_4035 usbnet_write_cmd_async 7 4035 NULL
++read_file_queues_4078 read_file_queues 3 4078 NULL
++fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
++da9052_free_irq_4090 da9052_free_irq 2 4090 NULL
++C_SYSC_rt_sigpending_4114 C_SYSC_rt_sigpending 2 4114 NULL
++ntb_netdev_change_mtu_4147 ntb_netdev_change_mtu 2 4147 NULL
++tm6000_read_4151 tm6000_read 3 4151 NULL
++mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
++msg_bits_4158 msg_bits 0-3-4 4158 NULL
++get_alua_req_4166 get_alua_req 3 4166 NULL
++blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
++read_file_bool_4180 read_file_bool 3 4180 NULL
++f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
++_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
++__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
++goldfish_audio_write_4284 goldfish_audio_write 3 4284 NULL
++paging32_page_fault_4288 paging32_page_fault 2 4288 NULL
++xt_compat_add_offset_4289 xt_compat_add_offset 0 4289 NULL
++__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL
++dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 3-2-5 4303 NULL
++nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
++snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
++__copy_from_user_inatomic_4365 __copy_from_user_inatomic 3 4365 NULL
++sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
++irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
++access_process_vm_4412 access_process_vm 0-2-4 4412 NULL nohasharray
++cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
++libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
++do_pages_stat_4437 do_pages_stat 2 4437 NULL
++memparse_4444 memparse 0 4444 NULL
++at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
++snd_seq_expand_var_event_4481 snd_seq_expand_var_event 0-5 4481 NULL
++sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
++vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
++set_link_security_4502 set_link_security 4 4502 NULL
++dm_cache_remove_mapping_4513 dm_cache_remove_mapping 2 4513 NULL
++__gfn_to_pfn_memslot_4530 __gfn_to_pfn_memslot 2 4530 NULL
++sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
++da9052_group_write_4534 da9052_group_write 2-3 4534 NULL
++tty_register_device_4544 tty_register_device 2 4544 NULL
++videobuf_vmalloc_to_sg_4548 videobuf_vmalloc_to_sg 2 4548 NULL
++btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
++xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
++bch_alloc_4593 bch_alloc 1 4593 NULL
++__wb_force_remove_mapping_4622 __wb_force_remove_mapping 2 4622 NULL
++iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
++skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
++cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
++short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
++kone_receive_4690 kone_receive 4 4690 NULL
++round_pipe_size_4701 round_pipe_size 0 4701 NULL
++cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
++konepure_sysfs_read_4709 konepure_sysfs_read 6 4709 NULL
++btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
++ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
++show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
++__find_free_cblock_4741 __find_free_cblock 2 4741 NULL
++memblock_find_in_range_4759 memblock_find_in_range 3-4 4759 NULL
++pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
++create_subvol_4791 create_subvol 4 4791 NULL
++ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
++repair_io_failure_4815 repair_io_failure 4 4815 NULL
++gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
++key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
++ocfs2_defrag_extent_4873 ocfs2_defrag_extent 3 4873 NULL
++hid_register_field_4874 hid_register_field 2-3 4874 NULL
++vga_arb_read_4886 vga_arb_read 3 4886 NULL
++sys_ipc_4889 sys_ipc 3 4889 NULL
++sys_process_vm_writev_4928 sys_process_vm_writev 3-5 4928 NULL
++ntfs_rl_insert_4931 ntfs_rl_insert 2-4 4931 NULL
++ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
++devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
++compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
++skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
++ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval 3 4976 NULL
++vmw_surface_define_size_4993 vmw_surface_define_size 0 4993 NULL
++compat_SyS_ipc_5000 compat_SyS_ipc 3-4-5-6 5000 NULL
++qla82xx_pci_mem_write_direct_5008 qla82xx_pci_mem_write_direct 2 5008 NULL
++do_mincore_5018 do_mincore 0-1 5018 NULL
++mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
++ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 2-3 5066 NULL
++snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
++snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
++get_random_bytes_5091 get_random_bytes 2 5091 NULL nohasharray
++kfifo_copy_from_user_5091 kfifo_copy_from_user 3 5091 &get_random_bytes_5091 nohasharray
++blk_rq_sectors_5091 blk_rq_sectors 0 5091 &kfifo_copy_from_user_5091
++mpol_to_str_5093 mpol_to_str 2 5093 NULL
++sound_write_5102 sound_write 3 5102 NULL
++clear_dirty_5105 clear_dirty 3 5105 NULL
++ufs_add_fragments_5144 ufs_add_fragments 2 5144 NULL
++compat_ptr_5159 compat_ptr 0-1 5159 NULL
++__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
++iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
++acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
++sfi_map_memory_5183 sfi_map_memory 1-2 5183 NULL
++skb_network_header_5203 skb_network_header 0 5203 NULL
++pipe_set_size_5204 pipe_set_size 2 5204 NULL
++ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
++dwc2_hcd_urb_alloc_5217 dwc2_hcd_urb_alloc 2 5217 NULL
++ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
++ssb_ioremap_5228 ssb_ioremap 2 5228 NULL nohasharray
++konepure_sysfs_write_5228 konepure_sysfs_write 6 5228 &ssb_ioremap_5228
++isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
++do_atmif_sioc_5247 do_atmif_sioc 3 5247 NULL
++gfn_to_hva_memslot_5265 gfn_to_hva_memslot 2 5265 NULL
++alloc_cache_blocks_with_hash_5285 alloc_cache_blocks_with_hash 2 5285 NULL
++__gfn_to_hva_memslot_5304 __gfn_to_hva_memslot 0-2 5304 NULL
++sbc_get_write_same_sectors_5317 sbc_get_write_same_sectors 0 5317 NULL
++pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
++allocate_cnodes_5329 allocate_cnodes 1 5329 NULL
++ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
++cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
++kvm_pin_pages_5369 kvm_pin_pages 2 5369 NULL
++bitmap_fold_5396 bitmap_fold 4 5396 NULL
++nilfs_palloc_entries_per_group_5418 nilfs_palloc_entries_per_group 0 5418 NULL
++sfi_map_table_5462 sfi_map_table 1 5462 NULL
++xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
++xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
++ubi_leb_write_5478 ubi_leb_write 4-5 5478 NULL
++cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
++tty_write_5494 tty_write 3 5494 NULL
++tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
++ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
++__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
++spidev_message_5518 spidev_message 3 5518 NULL
++ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
++get_entry_msg_len_5552 get_entry_msg_len 0 5552 NULL
++brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
++le_readq_5557 le_readq 0 5557 NULL
++inw_5558 inw 0 5558 NULL
++fir16_create_5574 fir16_create 3 5574 NULL
++bioset_create_5580 bioset_create 1 5580 NULL
++oz_ep_alloc_5587 oz_ep_alloc 2 5587 NULL
++usb_dump_device_descriptor_5599 usb_dump_device_descriptor 0 5599 NULL
++ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
++compat_copy_entries_5617 compat_copy_entries 0 5617 NULL
++SYSC_fsetxattr_5639 SYSC_fsetxattr 4 5639 NULL
++ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
++posix_clock_register_5662 posix_clock_register 2 5662 NULL
++mthca_map_reg_5664 mthca_map_reg 2-3 5664 NULL
++__videobuf_alloc_vb_5665 __videobuf_alloc_vb 1 5665 NULL
++wb_clear_dirty_5684 wb_clear_dirty 2 5684 NULL
++get_arg_5694 get_arg 3 5694 NULL
++subbuf_read_actor_5708 subbuf_read_actor 3 5708 NULL
++vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
++reexecute_instruction_5733 reexecute_instruction 2 5733 NULL
++rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
++ubi_cdev_compat_ioctl_5746 ubi_cdev_compat_ioctl 3 5746 NULL
++sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL nohasharray
++qlcnic_83xx_sysfs_flash_read_handler_5775 qlcnic_83xx_sysfs_flash_read_handler 6 5775 &sctp_setsockopt_autoclose_5775
++compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
++__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
++skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
++nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL
++ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
++autofs4_root_compat_ioctl_5838 autofs4_root_compat_ioctl 3 5838 NULL
++ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
++ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
++setup_req_5848 setup_req 3 5848 NULL
++rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
++compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL nohasharray
++uinput_compat_ioctl_5861 uinput_compat_ioctl 3 5861 &compat_sys_move_pages_5861
++paging64_walk_addr_5887 paging64_walk_addr 3 5887 NULL
++port_show_regs_5904 port_show_regs 3 5904 NULL
++rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL
++uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
++qla82xx_pci_mem_read_2M_5912 qla82xx_pci_mem_read_2M 2 5912 NULL
++ttm_bo_kmap_ttm_5922 ttm_bo_kmap_ttm 3 5922 NULL
++lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
++ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
++edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
++tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
++__apu_get_register_5967 __apu_get_register 0 5967 NULL
++ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
++native_pte_val_5978 native_pte_val 0 5978 NULL
++SyS_semop_5980 SyS_semop 3 5980 NULL
++ntfs_rl_append_6037 ntfs_rl_append 2-4 6037 NULL
++da9052_request_irq_6058 da9052_request_irq 2 6058 NULL
++sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
++rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
++md_trim_bio_6078 md_trim_bio 2 6078 NULL
++ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
++dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
++matrix_keypad_build_keymap_6129 matrix_keypad_build_keymap 3 6129 NULL
++nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
++ieee80211_if_fmt_beacon_timeout_6153 ieee80211_if_fmt_beacon_timeout 3 6153 NULL
++ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
++vdma_mem_alloc_6171 vdma_mem_alloc 1 6171 NULL
++wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
++paging64_walk_addr_generic_6180 paging64_walk_addr_generic 4 6180 NULL
++qp_host_get_user_memory_6189 qp_host_get_user_memory 1-2 6189 NULL
++mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL
++v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
++mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
++f_hidg_read_6238 f_hidg_read 3 6238 NULL
++fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
++pcpu_next_pop_6277 pcpu_next_pop 4 6277 NULL
++tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
++snd_hda_override_conn_list_6282 snd_hda_override_conn_list 3 6282 NULL nohasharray
++xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
++posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL
++paging64_gva_to_gpa_nested_6287 paging64_gva_to_gpa_nested 2 6287 NULL
++nf_nat_ipv6_manip_pkt_6289 nf_nat_ipv6_manip_pkt 2 6289 NULL
++nf_nat_sack_adjust_6297 nf_nat_sack_adjust 2 6297 NULL
++mid_get_vbt_data_r10_6308 mid_get_vbt_data_r10 2 6308 NULL
++qlcnic_sriov_alloc_bc_msg_6309 qlcnic_sriov_alloc_bc_msg 2 6309 NULL
++SyS_mincore_6329 SyS_mincore 1 6329 NULL
++fuse_get_req_for_background_6337 fuse_get_req_for_background 2 6337 NULL
++ucs2_strnlen_6342 ucs2_strnlen 0 6342 NULL
++mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL
++_proc_do_string_6376 _proc_do_string 2 6376 NULL
++osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
++isku_sysfs_write_light_6406 isku_sysfs_write_light 6 6406 NULL
++posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
++ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
++__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2-3 6432 NULL
++paging32_gva_to_gpa_nested_6442 paging32_gva_to_gpa_nested 2 6442 NULL
++mlx4_ib_reg_user_mr_6471 mlx4_ib_reg_user_mr 2-3 6471 NULL nohasharray
++ext4_compat_ioctl_6471 ext4_compat_ioctl 3 6471 &mlx4_ib_reg_user_mr_6471
++ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
++qp_memcpy_from_queue_6479 qp_memcpy_from_queue 4-5 6479 NULL
++cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
++dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
++mei_read_6507 mei_read 3 6507 NULL
++read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
++rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
++wdm_read_6549 wdm_read 3 6549 NULL
++isku_sysfs_write_keys_easyzone_6553 isku_sysfs_write_keys_easyzone 6 6553 NULL
++fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
++SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
++SyS_fcntl64_6582 SyS_fcntl64 3 6582 NULL
++snmp_mib_init_6604 snmp_mib_init 2-3 6604 NULL
++ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
++compat_SyS_shmat_6642 compat_SyS_shmat 2 6642 NULL
++virtscsi_alloc_tgt_6643 virtscsi_alloc_tgt 2 6643 NULL
++aac_srcv_ioremap_6659 aac_srcv_ioremap 2 6659 NULL
++process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
++ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL
++btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2 6696 NULL
++ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
++bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
++mpeg_read_6708 mpeg_read 3 6708 NULL
++set_orig_insn_6712 set_orig_insn 3 6712 NULL
++video_proc_write_6724 video_proc_write 3 6724 NULL
++posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
++rds_rdma_pages_6735 rds_rdma_pages 0 6735 NULL
++sfi_check_table_6772 sfi_check_table 1 6772 NULL
++iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
++ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
++hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
++tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
++make_8259A_irq_6828 make_8259A_irq 1 6828 NULL
++calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
++mon_bin_read_6841 mon_bin_read 3 6841 NULL
++snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
++ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 NULL nohasharray
++raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 &ieee80211_if_fmt_path_refresh_time_6888
++dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
++spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
++proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911 nohasharray
++acm_alloc_minor_6911 acm_alloc_minor 0 6911 &proc_sessionid_read_6911
++__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
++do_msgrcv_6921 do_msgrcv 3 6921 NULL
++cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
++qsfp_cks_6945 qsfp_cks 0-2 6945 NULL
++pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
++videobuf_dma_init_kernel_6963 videobuf_dma_init_kernel 3 6963 NULL
++rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL
++crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
++request_key_async_6990 request_key_async 4 6990 NULL
++tpl_write_6998 tpl_write 3 6998 NULL
++r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
++cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
++tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
++wimax_msg_7030 wimax_msg 4 7030 NULL
++ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
++snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
++event_enable_read_7074 event_enable_read 3 7074 NULL
++beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
++lp_compat_ioctl_7098 lp_compat_ioctl 3 7098 NULL
++pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
++check_header_7108 check_header 0 7108 NULL
++qlcnic_enable_msix_7144 qlcnic_enable_msix 2 7144 NULL
++__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
++batadv_check_unicast_ttvn_7206 batadv_check_unicast_ttvn 3 7206 NULL
++sys32_ipc_7238 sys32_ipc 3-5-6 7238 NULL
++get_param_h_7247 get_param_h 0 7247 NULL
++af_alg_make_sg_7254 af_alg_make_sg 3 7254 NULL
++vm_mmap_pgoff_7259 vm_mmap_pgoff 0 7259 NULL
++dma_ops_alloc_addresses_7272 dma_ops_alloc_addresses 3-4-5 7272 NULL
++rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
++isku_sysfs_write_macro_7293 isku_sysfs_write_macro 6 7293 NULL
++wb_remove_mapping_7307 wb_remove_mapping 2 7307 NULL
++mgmt_control_7349 mgmt_control 3 7349 NULL
++ext3_free_blocks_7362 ext3_free_blocks 3-4 7362 NULL
++ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
++hweight_long_7388 hweight_long 0-1 7388 NULL
++vhost_scsi_compat_ioctl_7393 vhost_scsi_compat_ioctl 3 7393 NULL
++sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
++readb_7401 readb 0 7401 NULL
++drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
++ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
++SYSC_setgroups_7454 SYSC_setgroups 1 7454 NULL
++numa_emulation_7466 numa_emulation 2 7466 NULL
++__mutex_lock_common_7469 __mutex_lock_common 0 7469 NULL
++garp_request_join_7471 garp_request_join 4 7471 NULL
++compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
++snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
++sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL nohasharray
++ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 &sdhci_alloc_host_7509
++array_zalloc_7519 array_zalloc 1-2 7519 NULL
++goal_in_my_reservation_7553 goal_in_my_reservation 3 7553 NULL
++smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
++btrfs_block_rsv_add_7579 btrfs_block_rsv_add 3 7579 NULL
++ext3_try_to_allocate_7590 ext3_try_to_allocate 5-3 7590 NULL
++groups_alloc_7614 groups_alloc 1 7614 NULL
++sg_virt_7616 sg_virt 0 7616 NULL
++skb_copy_expand_7685 skb_copy_expand 2-3 7685 NULL nohasharray
++acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 &skb_copy_expand_7685
++acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
++dev_write_7708 dev_write 3 7708 NULL
++unmap_region_7709 unmap_region 1 7709 NULL
++brcmf_sdcard_send_buf_7713 brcmf_sdcard_send_buf 6 7713 NULL
++set_bypass_pwup_pfs_7742 set_bypass_pwup_pfs 3 7742 NULL
++vxge_device_register_7752 vxge_device_register 4 7752 NULL
++osdv2_attr_list_elem_size_7763 osdv2_attr_list_elem_size 0-1 7763 NULL
++ubi_io_read_vid_hdr_7766 ubi_io_read_vid_hdr 0 7766 NULL
++ioread32be_7773 ioread32be 0 7773 NULL
++alloc_candev_7776 alloc_candev 1-2 7776 NULL
++dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
++bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
++diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
++ubifs_leb_read_7828 ubifs_leb_read 0 7828 NULL
++dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
++xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
++gfs2_tune_get_i_7903 gfs2_tune_get_i 0 7903 NULL
++ext3_group_extend_7911 ext3_group_extend 3 7911 NULL
++libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
++f_hidg_write_7932 f_hidg_write 3 7932 NULL
++io_apic_setup_irq_pin_once_7934 io_apic_setup_irq_pin_once 1 7934 NULL
++hash_netiface6_expire_7944 hash_netiface6_expire 3 7944 NULL
++integrity_digsig_verify_7956 integrity_digsig_verify 3 7956 NULL
++smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
++sys_mbind_7990 sys_mbind 5 7990 NULL
++tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
++vcs_read_8017 vcs_read 3 8017 NULL
++normalize_up_8037 normalize_up 0-1-2 8037 NULL
++vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
++ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
++dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
++leb_read_lock_8070 leb_read_lock 0 8070 NULL
++alloc_targets_8074 alloc_targets 2 8074 NULL nohasharray
++qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 &alloc_targets_8074
++venus_lookup_8121 venus_lookup 4 8121 NULL
++ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
++dma_map_area_8178 dma_map_area 5-2-3 8178 NULL
++ore_truncate_8181 ore_truncate 3 8181 NULL
++__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
++ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
++recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
++rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
++ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
++play_iframe_8219 play_iframe 3 8219 NULL
++create_log_8225 create_log 2 8225 NULL nohasharray
++kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 &create_log_8225
++sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
++ceph_sync_write_8233 ceph_sync_write 4 8233 NULL
++bnx2x_iov_get_max_queue_count_8235 bnx2x_iov_get_max_queue_count 0 8235 NULL
++check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
++add_rx_skb_8257 add_rx_skb 3 8257 NULL
++t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
++init_cdev_8274 init_cdev 1 8274 NULL
++rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
++qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
++construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
++ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
++tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
++ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL
++isku_sysfs_write_talk_8360 isku_sysfs_write_talk 6 8360 NULL nohasharray
++ping_getfrag_8360 ping_getfrag 4-3 8360 &isku_sysfs_write_talk_8360
++uvc_v4l2_compat_ioctl32_8375 uvc_v4l2_compat_ioctl32 3 8375 NULL
++xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
++zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
++smk_write_change_rule_8411 smk_write_change_rule 3 8411 NULL nohasharray
++uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 &smk_write_change_rule_8411
++pca953x_gpio_to_irq_8424 pca953x_gpio_to_irq 2 8424 NULL
++snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
++irq_create_mapping_8437 irq_create_mapping 2 8437 NULL
++afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
++batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL
++dev_config_8506 dev_config 3 8506 NULL
++ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
++opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL
++pnp_resource_len_8532 pnp_resource_len 0 8532 NULL
++alloc_pg_vec_8533 alloc_pg_vec 2 8533 NULL
++ocfs2_read_virt_blocks_8538 ocfs2_read_virt_blocks 2-3 8538 NULL
++user_on_off_8552 user_on_off 2 8552 NULL
++profile_remove_8556 profile_remove 3 8556 NULL
++cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
++mga_ioremap_8571 mga_ioremap 1-2 8571 NULL
++isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
++tower_write_8580 tower_write 3 8580 NULL
++rtllib_MFIE_rate_len_8606 rtllib_MFIE_rate_len 0 8606 NULL
++shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
++it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
++scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
++fuse_send_write_pages_8636 fuse_send_write_pages 0-5 8636 NULL
++generic_acl_set_8658 generic_acl_set 4 8658 NULL
++dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
++lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
++tc3589x_gpio_irq_unmap_8680 tc3589x_gpio_irq_unmap 2 8680 NULL
++rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
++skb_frag_size_8695 skb_frag_size 0 8695 NULL
++arcfb_write_8702 arcfb_write 3 8702 NULL
++i_size_read_8703 i_size_read 0 8703 NULL nohasharray
++init_header_8703 init_header 0 8703 &i_size_read_8703
++ctrl_out_8712 ctrl_out 3-5 8712 NULL
++jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
++f_dupfd_8730 f_dupfd 1 8730 NULL
++__create_irqs_8733 __create_irqs 2-1 8733 NULL
++pca953x_gpio_irq_map_8737 pca953x_gpio_irq_map 2 8737 NULL
++tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
++joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
++sys_prctl_8766 sys_prctl 4 8766 NULL
++x32_arch_ptrace_8767 x32_arch_ptrace 3-4 8767 NULL
++paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL
++ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL
++__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
++cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
++metronomefb_write_8823 metronomefb_write 3 8823 NULL
++icmpv6_manip_pkt_8833 icmpv6_manip_pkt 4 8833 NULL nohasharray
++get_queue_depth_8833 get_queue_depth 0 8833 &icmpv6_manip_pkt_8833
++dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
++usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
++clear_bitset_8840 clear_bitset 2 8840 NULL
++debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
++wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
++compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
++tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 NULL
++sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
++layout_commit_8926 layout_commit 3 8926 NULL
++adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
++driver_stats_read_8944 driver_stats_read 3 8944 NULL
++read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
++seq_bitmap_list_8963 seq_bitmap_list 3 8963 NULL
++usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
++qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
++venus_mkdir_8967 venus_mkdir 4 8967 NULL
++vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
++seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
++bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
++btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3 8986 NULL
++snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
++__pskb_copy_9038 __pskb_copy 2 9038 NULL
++nla_put_9042 nla_put 3 9042 NULL
++snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
++snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
++fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
++string_9080 string 0 9080 NULL
++create_queues_9088 create_queues 2-3 9088 NULL
++ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
++caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray
++gfn_to_rmap_9110 gfn_to_rmap 2-3 9110 &caif_stream_sendmsg_9110
++pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
++isku_sysfs_write_keys_macro_9120 isku_sysfs_write_keys_macro 6 9120 NULL
++mq_remove_mapping_9124 mq_remove_mapping 2 9124 NULL
++mlx4_alloc_resize_umem_9132 mlx4_alloc_resize_umem 3 9132 NULL
++ext4_list_backups_9138 ext4_list_backups 0 9138 NULL
++dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
++isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
++count_leading_zeros_9183 count_leading_zeros 0 9183 NULL
++alloc_group_attrs_9194 alloc_group_attrs 2 9194 NULL nohasharray
++altera_swap_ir_9194 altera_swap_ir 2 9194 &alloc_group_attrs_9194
++gx1_gx_base_9198 gx1_gx_base 0 9198 NULL
++snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
++get_pfn_9207 get_pfn 1 9207 NULL
++virtqueue_add_9217 virtqueue_add 5-4 9217 NULL
++tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
++sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
++ocfs2_clear_ext_refcount_9256 ocfs2_clear_ext_refcount 4 9256 NULL
++tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
++sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
++hdpvr_read_9273 hdpvr_read 3 9273 NULL
++flakey_status_9274 flakey_status 5 9274 NULL
++qla82xx_pci_set_window_9303 qla82xx_pci_set_window 0-2 9303 NULL
++iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
++ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
++memblock_find_in_range_node_9328 memblock_find_in_range_node 0-3-4 9328 NULL
++ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL
++nvme_trans_fmt_get_parm_header_9340 nvme_trans_fmt_get_parm_header 2 9340 NULL
++ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
++sta_beacon_loss_count_read_9370 sta_beacon_loss_count_read 3 9370 NULL
++virtqueue_add_outbuf_9395 virtqueue_add_outbuf 3 9395 NULL
++read_9397 read 3 9397 NULL
++nf_nat_sip_expect_9418 nf_nat_sip_expect 8 9418 NULL
++bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
++ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
++kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
++ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
++mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
++ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
++agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
++__alloc_preds_9492 __alloc_preds 2 9492 NULL nohasharray
++crypt_status_9492 crypt_status 5 9492 &__alloc_preds_9492
++lp_write_9511 lp_write 3 9511 NULL
++xen_remap_exchanged_ptes_9513 xen_remap_exchanged_ptes 1 9513 NULL
++scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
++read_file_dma_9530 read_file_dma 3 9530 NULL
++ext3_alloc_branch_9534 ext3_alloc_branch 5 9534 NULL
++audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
++fw_node_create_9559 fw_node_create 2 9559 NULL
++ipath_get_user_pages_9561 ipath_get_user_pages 1-2 9561 NULL
++kobj_map_9566 kobj_map 2-3 9566 NULL
++f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL
++biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
++ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
++use_block_rsv_9597 use_block_rsv 3 9597 NULL
++do_sync_9604 do_sync 1 9604 NULL
++snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
++saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
++ceph_copy_user_to_page_vector_9635 ceph_copy_user_to_page_vector 4-3 9635 NULL
++compat_sys_keyctl_9639 compat_sys_keyctl 4-2-3 9639 NULL
++ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
++uvc_alloc_buffers_9656 uvc_alloc_buffers 2-3 9656 NULL
++queue_received_packet_9657 queue_received_packet 5 9657 NULL
++snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
++ks8842_read16_9676 ks8842_read16 0 9676 NULL nohasharray
++dns_query_9676 dns_query 3 9676 &ks8842_read16_9676
++qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
++__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
++is_hole_9694 is_hole 2 9694 NULL nohasharray
++x25_asy_compat_ioctl_9694 x25_asy_compat_ioctl 4 9694 &is_hole_9694
++fnb_9703 fnb 2-3 9703 NULL
++fuse_iter_npages_9705 fuse_iter_npages 0 9705 NULL nohasharray
++ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 &fuse_iter_npages_9705
++nla_get_u8_9736 nla_get_u8 0 9736 NULL
++ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL
++ddb_input_read_9743 ddb_input_read 3 9743 NULL
++sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL
++btrfs_super_root_9763 btrfs_super_root 0 9763 NULL
++__alloc_percpu_9764 __alloc_percpu 1-2 9764 NULL
++__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
++snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
++ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
++ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 NULL
++pnp_mem_start_9817 pnp_mem_start 0 9817 NULL
++kernel_physical_mapping_init_9818 kernel_physical_mapping_init 0-2-1 9818 NULL
++dvb_dvr_set_buffer_size_9840 dvb_dvr_set_buffer_size 2 9840 NULL
++cfg80211_send_deauth_9862 cfg80211_send_deauth 3 9862 NULL
++pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
++btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
++f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL
++mlx4_bitmap_alloc_range_9876 mlx4_bitmap_alloc_range 2-3 9876 NULL
++wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL
++bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
++snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
++snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 3-5 9895 NULL
++nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL
++gen6_get_total_gtt_size_9913 gen6_get_total_gtt_size 0-1 9913 NULL
++pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
++read_file_misc_9948 read_file_misc 3 9948 NULL
++set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
++ext2_new_blocks_9954 ext2_new_blocks 2 9954 NULL
++csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
++get_free_serial_index_9969 get_free_serial_index 0 9969 NULL
++btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
++ath6kl_usb_submit_ctrl_out_9978 ath6kl_usb_submit_ctrl_out 6 9978 NULL
++SYSC_move_pages_9986 SYSC_move_pages 2 9986 NULL
++aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
++handle_request_10024 handle_request 9 10024 NULL
++batadv_orig_hash_add_if_10033 batadv_orig_hash_add_if 2 10033 NULL
++ieee80211_probereq_get_10040 ieee80211_probereq_get 4-5 10040 NULL
++xen_destroy_contiguous_region_10054 xen_destroy_contiguous_region 1 10054 NULL
++vfio_pci_write_10063 vfio_pci_write 3 10063 NULL
++ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
++ufs_bitmap_search_10105 ufs_bitmap_search 0-3 10105 NULL
++get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray
++dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110
++jbd_alloc_10112 jbd_alloc 0 10112 NULL nohasharray
++gfs2_meta_read_10112 gfs2_meta_read 0 10112 &jbd_alloc_10112
++offset_to_bit_10134 offset_to_bit 0 10134 NULL
++aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
++rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
++hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL nohasharray
++ol_chunk_entries_10159 ol_chunk_entries 0 10159 &hidg_alloc_ep_req_10159
++stmpe_irq_unmap_10164 stmpe_irq_unmap 2 10164 NULL
++asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
++proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
++jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
++do_ioctl_trans_10194 do_ioctl_trans 3 10194 NULL
++cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
++__qlcnic_pci_sriov_enable_10281 __qlcnic_pci_sriov_enable 2 10281 NULL
++snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
++ubi_leb_change_10289 ubi_leb_change 4 10289 NULL
++read_emulate_10310 read_emulate 2-4 10310 NULL
++read_file_spectral_count_10320 read_file_spectral_count 3 10320 NULL
++ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
++ubi_leb_read_10328 ubi_leb_read 0 10328 NULL
++tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
++get_dump_page_10338 get_dump_page 1 10338 NULL
++ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
++dbAllocAny_10354 dbAllocAny 0 10354 NULL
++ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
++ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
++sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
++ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
++do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
++fwtty_rx_10434 fwtty_rx 3 10434 NULL
++event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
++ca91cx42_alloc_resource_10502 ca91cx42_alloc_resource 2 10502 NULL
++qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
++sel_write_disable_10511 sel_write_disable 3 10511 NULL
++osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
++rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
++qlcnic_pci_sriov_enable_10519 qlcnic_pci_sriov_enable 2 10519 NULL nohasharray
++hash_netiface4_expire_10519 hash_netiface4_expire 3 10519 &qlcnic_pci_sriov_enable_10519
++ocfs2_add_refcounted_extent_10526 ocfs2_add_refcounted_extent 6 10526 NULL
++get_vm_area_caller_10527 get_vm_area_caller 1 10527 NULL
++snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 NULL
++ieee80211_send_probe_req_10539 ieee80211_send_probe_req 4-6 10539 NULL
++ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL
++scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL
++otp_read_10594 otp_read 2-4-5 10594 NULL
++supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
++ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
++alloc_coherent_10632 alloc_coherent 2 10632 NULL
++nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
++dtf_read_device_10663 dtf_read_device 3 10663 NULL
++parport_write_10669 parport_write 0 10669 NULL
++inl_10708 inl 0 10708 NULL nohasharray
++selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 &inl_10708
++pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
++shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
++spi_sync_10731 spi_sync 0 10731 NULL
++sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray
++apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737
++compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
++sys_syslog_10746 sys_syslog 3 10746 NULL
++alloc_one_pg_vec_page_10747 alloc_one_pg_vec_page 1 10747 NULL
++vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
++kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4-2 10765 NULL
++wb_set_dirty_10778 wb_set_dirty 2 10778 NULL
++__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL
++sys_bind_10799 sys_bind 3 10799 NULL
++compat_put_int_10828 compat_put_int 1 10828 NULL
++lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
++ida_get_new_above_10853 ida_get_new_above 2 10853 NULL
++fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
++ol_chunk_blocks_10864 ol_chunk_blocks 0 10864 NULL
++snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
++mid_get_vbt_data_r0_10876 mid_get_vbt_data_r0 2 10876 NULL
++bl_mark_for_commit_10879 bl_mark_for_commit 2-3 10879 NULL
++get_scq_10897 get_scq 2 10897 NULL
++cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
++tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
++__copy_from_user_10918 __copy_from_user 3 10918 NULL
++da9052_map_irq_10952 da9052_map_irq 2 10952 NULL
++ci_port_test_write_10962 ci_port_test_write 3 10962 NULL
++bm_entry_read_10976 bm_entry_read 3 10976 NULL
++i915_min_freq_write_10981 i915_min_freq_write 3 10981 NULL
++sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
++__hci_num_ctrl_10985 __hci_num_ctrl 0 10985 NULL
++xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
++rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
++SetLineNumber_11023 SetLineNumber 0 11023 NULL
++mb_find_next_bit_11037 mb_find_next_bit 2-3-0 11037 NULL
++tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
++tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
++count_argc_11083 count_argc 0 11083 NULL
++kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
++stmpe_gpio_to_irq_11110 stmpe_gpio_to_irq 2 11110 NULL
++tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
++page_offset_11120 page_offset 0 11120 NULL
++tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL nohasharray
++cea_db_payload_len_11124 cea_db_payload_len 0 11124 &tracing_buffers_read_11124
++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
++acpi_os_map_memory_11161 acpi_os_map_memory 1-2 11161 NULL
++ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL nohasharray
++snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 3-4 11172 &ioat2_alloc_ring_11172
++__swab16p_11220 __swab16p 0 11220 NULL
++il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
++mmap_region_11247 mmap_region 0-2 11247 NULL
++ubifs_write_node_11258 ubifs_write_node 5-3 11258 NULL
++dm_cache_discard_bitset_resize_11262 dm_cache_discard_bitset_resize 3 11262 NULL
++hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
++cru_detect_11272 cru_detect 1 11272 NULL
++ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
++tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 NULL
++construct_key_11329 construct_key 3 11329 NULL nohasharray
++__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
++next_segment_11330 next_segment 0-2-1 11330 NULL
++persistent_ram_buffer_map_11332 persistent_ram_buffer_map 1-2 11332 NULL
++ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL
++i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
++sel_write_create_11353 sel_write_create 3 11353 NULL
++handle_unit_11355 handle_unit 0-1 11355 NULL
++batadv_skb_head_push_11360 batadv_skb_head_push 2 11360 NULL
++drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
++qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
++isku_sysfs_read_keys_capslock_11392 isku_sysfs_read_keys_capslock 6 11392 NULL
++dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
++___alloc_bootmem_11410 ___alloc_bootmem 1-2 11410 NULL
++str_to_user_11411 str_to_user 2 11411 NULL
++mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
++ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL
++adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 NULL
++trace_options_read_11419 trace_options_read 3 11419 NULL
++xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
++prepare_image_11424 prepare_image 0 11424 NULL
++vring_size_11426 vring_size 0-1-2 11426 NULL
++bttv_read_11432 bttv_read 3 11432 NULL
++create_zero_mask_11453 create_zero_mask 0-1 11453 NULL
++swp_offset_11475 swp_offset 0 11475 NULL
++sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 NULL
++xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
++sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
++kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
++twl_direction_in_11527 twl_direction_in 2 11527 NULL
++setup_IO_APIC_irq_extra_11537 setup_IO_APIC_irq_extra 1 11537 NULL
++skb_cow_data_11565 skb_cow_data 0-2 11565 NULL
++mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
++oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
++snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
++fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
++SYSC_mq_timedsend_11607 SYSC_mq_timedsend 3 11607 NULL
++add_new_bitmap_11644 add_new_bitmap 3 11644 NULL
++sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
++nla_total_size_11658 nla_total_size 0-1 11658 NULL
++ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
++compat_SyS_msgsnd_11675 compat_SyS_msgsnd 2-3 11675 NULL
++btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
++dsp_buffer_alloc_11684 dsp_buffer_alloc 2 11684 NULL
++sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
++split_11691 split 2 11691 NULL
++snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
++blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
++tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
++nfsd4_get_drc_mem_11748 nfsd4_get_drc_mem 0-1-2 11748 NULL
++dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
++iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
++ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
++ebt_buf_add_11779 ebt_buf_add 0 11779 NULL
++btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
++pcpu_fc_alloc_11818 pcpu_fc_alloc 2-3 11818 NULL
++zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
++sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
++rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
++unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
++nf_nat_sdp_media_11863 nf_nat_sdp_media 9 11863 NULL
++ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
++ieee80211_rx_bss_info_11887 ieee80211_rx_bss_info 3 11887 NULL
++kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
++fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
++bitmap_remap_11929 bitmap_remap 5 11929 NULL
++atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
++dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
++f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
++atmel_read16_11981 atmel_read16 0 11981 NULL
++read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL
++ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
++ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050 NULL
++il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
++ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
++ubifs_recover_log_leb_12079 ubifs_recover_log_leb 3 12079 NULL
++pse36_gfn_delta_12087 pse36_gfn_delta 0-1 12087 NULL
++alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
++set_powered_12129 set_powered 4 12129 NULL
++nfs_writedata_alloc_12133 nfs_writedata_alloc 2 12133 NULL
++ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
++xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
++batadv_add_packet_12136 batadv_add_packet 3 12136 NULL
++rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
++vmw_fifo_reserve_12141 vmw_fifo_reserve 2 12141 NULL
++get_idx_gc_leb_12148 get_idx_gc_leb 0 12148 NULL
++btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
++vmbus_open_12154 vmbus_open 2-3 12154 NULL
++wil_rxdesc_phy_length_12165 wil_rxdesc_phy_length 0 12165 NULL
++dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
++ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
++compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
++ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
++bl_is_sector_init_12199 bl_is_sector_init 2 12199 NULL
++scaled_div_12201 scaled_div 1-2 12201 NULL
++free_initrd_mem_12203 free_initrd_mem 1 12203 NULL
++receive_copy_12216 receive_copy 3 12216 NULL
++snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
++fuse_get_req_12221 fuse_get_req 2 12221 NULL nohasharray
++aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 &fuse_get_req_12221
++__alloc_bootmem_low_nopanic_12235 __alloc_bootmem_low_nopanic 1-2 12235 NULL
++ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
++ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
++shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
++add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
++note_last_dentry_12285 note_last_dentry 3 12285 NULL
++roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray
++il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288
++vxge_get_num_vfs_12302 vxge_get_num_vfs 0 12302 NULL
++wrap_min_12303 wrap_min 0-1-2 12303 NULL
++tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
++pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
++mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
++paging32_walk_addr_12359 paging32_walk_addr 3 12359 NULL
++__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
++xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
++btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL
++hbucket_elem_add_12416 hbucket_elem_add 3 12416 NULL
++ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
++skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
++qla4_82xx_pci_mem_write_direct_12479 qla4_82xx_pci_mem_write_direct 2 12479 NULL
++x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
++rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
++fnic_trace_ctrl_read_12497 fnic_trace_ctrl_read 3 12497 NULL
++nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
++qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
++kvm_setup_async_pf_12555 kvm_setup_async_pf 3 12555 NULL
++ib_umem_get_12557 ib_umem_get 2-3 12557 NULL
++hvc_alloc_12579 hvc_alloc 4 12579 NULL
++snd_pcm_plugin_alloc_12580 snd_pcm_plugin_alloc 2 12580 NULL
++macvtap_compat_ioctl_12587 macvtap_compat_ioctl 3 12587 NULL
++pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
++ipv6_get_l4proto_12600 ipv6_get_l4proto 2 12600 NULL
++vhci_put_user_12604 vhci_put_user 4 12604 NULL
++fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
++pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
++ctrl_cdev_compat_ioctl_12634 ctrl_cdev_compat_ioctl 3 12634 NULL
++pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
++dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
++wb_create_12651 wb_create 1 12651 NULL
++ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
++sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
++sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
++ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
++ivtv_write_12721 ivtv_write 3 12721 NULL
++key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
++__mei_cl_async_send_12737 __mei_cl_async_send 3 12737 NULL
++__videobuf_alloc_cached_12740 __videobuf_alloc_cached 1 12740 NULL
++ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
++listxattr_12769 listxattr 3 12769 NULL
++sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
++ieee80211_rx_mgmt_beacon_12780 ieee80211_rx_mgmt_beacon 3 12780 NULL
++platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
++btrfs_remove_free_space_12793 btrfs_remove_free_space 2 12793 NULL
++scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
++xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
++readq_12825 readq 0 12825 NULL
++TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
++spidev_sync_12842 spidev_sync 0 12842 NULL nohasharray
++ath6kl_wmi_add_wow_pattern_cmd_12842 ath6kl_wmi_add_wow_pattern_cmd 4 12842 &spidev_sync_12842
++spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
++get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
++ocfs2_hamming_encode_block_12904 ocfs2_hamming_encode_block 2 12904 NULL
++get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
++rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
++ci_ll_init_12930 ci_ll_init 3 12930 NULL
++do_inode_permission_12946 do_inode_permission 0 12946 NULL
++bm_status_write_12964 bm_status_write 3 12964 NULL
++_drbd_md_first_sector_12984 _drbd_md_first_sector 0 12984 NULL
++raid56_parity_recover_12987 raid56_parity_recover 5 12987 NULL
++acpi_tb_install_table_12988 acpi_tb_install_table 1 12988 NULL
++TransmitTcb_12989 TransmitTcb 4 12989 NULL
++sk_peek_offset_12991 sk_peek_offset 0 12991 NULL
++subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
++generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
++ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
++__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL
++biovec_create_pool_13079 biovec_create_pool 2 13079 NULL
++irq_set_chip_and_handler_13088 irq_set_chip_and_handler 1 13088 NULL
++xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
++blk_rq_map_sg_13092 blk_rq_map_sg 0 13092 NULL
++mb_find_next_zero_bit_13100 mb_find_next_zero_bit 2-3 13100 NULL
++ubifs_compat_ioctl_13108 ubifs_compat_ioctl 3 13108 NULL
++snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
++xen_allocate_irq_dynamic_13116 xen_allocate_irq_dynamic 0 13116 NULL
++bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
++blk_update_request_13146 blk_update_request 3 13146 NULL
++caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
++pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
++ucs2_strlen_13178 ucs2_strlen 0 13178 NULL
++dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
++create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
++compat_put_ulong_13186 compat_put_ulong 1 13186 NULL
++__cmpxchg64_13187 __cmpxchg64 0 13187 NULL
++comedi_read_13199 comedi_read 3 13199 NULL
++mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
++__nodes_fold_13215 __nodes_fold 4 13215 NULL
++svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
++fnic_trace_ctrl_write_13229 fnic_trace_ctrl_write 3 13229 NULL
++asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
++fw_download_code_13249 fw_download_code 3 13249 NULL nohasharray
++kvm_lapic_enable_pv_eoi_13249 kvm_lapic_enable_pv_eoi 2 13249 &fw_download_code_13249
++init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
++hostap_80211_get_hdrlen_13255 hostap_80211_get_hdrlen 0 13255 NULL
++bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
++c4iw_reg_user_mr_13269 c4iw_reg_user_mr 2-3 13269 NULL
++carl9170_rx_13272 carl9170_rx 3 13272 NULL
++pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
++il4965_stats_flag_13281 il4965_stats_flag 0-3 13281 NULL
++lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
++platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
++i915_drop_caches_write_13308 i915_drop_caches_write 3 13308 NULL
++reexecute_instruction_13321 reexecute_instruction 2 13321 NULL
++us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
++__clone_and_map_data_bio_13334 __clone_and_map_data_bio 4-8 13334 NULL
++kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5-2 13337 NULL
++cache_ctr_13364 cache_ctr 2 13364 NULL
++mthca_alloc_mtt_range_13371 mthca_alloc_mtt_range 2 13371 NULL
++iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
++wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
++dis_bypass_write_13388 dis_bypass_write 3 13388 NULL
++carl9170_rx_untie_data_13405 carl9170_rx_untie_data 3 13405 NULL
++sky2_receive_13407 sky2_receive 2 13407 NULL
++netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
++keyring_read_13438 keyring_read 3 13438 NULL
++sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL nohasharray
++set_tap_pwup_pfs_13440 set_tap_pwup_pfs 3 13440 &sctp_setsockopt_peer_primary_addr_13440
++ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 8-9-7 13443 NULL
++mthca_buddy_alloc_13454 mthca_buddy_alloc 2 13454 NULL
++ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 2 13512 NULL
++core_status_13515 core_status 4 13515 NULL
++smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
++bm_init_13529 bm_init 2 13529 NULL
++non_atomic_pte_lookup_13540 non_atomic_pte_lookup 2 13540 NULL nohasharray
++SYSC_remap_file_pages_13540 SYSC_remap_file_pages 1 13540 &non_atomic_pte_lookup_13540
++ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL
++ubifs_get_idx_gc_leb_13566 ubifs_get_idx_gc_leb 0 13566 NULL
++sys_madvise_13569 sys_madvise 1 13569 NULL
++read_file_antenna_13574 read_file_antenna 3 13574 NULL
++cache_write_13589 cache_write 3 13589 NULL
++mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
++aac_sa_ioremap_13596 aac_sa_ioremap 2 13596 NULL nohasharray
++irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 &aac_sa_ioremap_13596
++usb_dump_interface_descriptor_13603 usb_dump_interface_descriptor 0 13603 NULL
++swap_cgroup_swapon_13614 swap_cgroup_swapon 2 13614 NULL
++wm8994_bulk_write_13615 wm8994_bulk_write 2-3 13615 NULL
++pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
++iio_device_add_event_sysfs_13627 iio_device_add_event_sysfs 0 13627 NULL
++packet_snd_13634 packet_snd 3 13634 NULL
++blk_msg_write_13655 blk_msg_write 3 13655 NULL
++cache_downcall_13666 cache_downcall 3 13666 NULL
++fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
++audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
++ufs_dtog_13750 ufs_dtog 0-2 13750 NULL
++ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
++fb_sys_read_13778 fb_sys_read 3 13778 NULL
++ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
++random_read_13815 random_read 3 13815 NULL
++hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
++___mei_cl_send_13821 ___mei_cl_send 3 13821 NULL
++evdev_ioctl_compat_13851 evdev_ioctl_compat 2-3 13851 NULL
++compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL nohasharray
++alloc_trace_uprobe_13870 alloc_trace_uprobe 3 13870 &compat_ip_setsockopt_13870
++qp_memcpy_to_queue_13886 qp_memcpy_to_queue 2-5 13886 NULL
++snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
++ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
++ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL nohasharray
++ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940
++iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
++compat_chaninfo_13945 compat_chaninfo 2 13945 NULL
++ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
++lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
++snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
++com90xx_found_13974 com90xx_found 3 13974 NULL
++qcam_read_13977 qcam_read 3 13977 NULL
++dsp_read_13980 dsp_read 2 13980 NULL
++bm_block_bits_13981 bm_block_bits 0 13981 NULL nohasharray
++dvb_demux_read_13981 dvb_demux_read 3 13981 &bm_block_bits_13981
++btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
++dmi_format_ids_14018 dmi_format_ids 2 14018 NULL
++_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
++iscsi_create_flashnode_conn_14022 iscsi_create_flashnode_conn 4 14022 NULL
++dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
++read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
++ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
++sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
++i915_drop_caches_read_14060 i915_drop_caches_read 3 14060 NULL
++do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
++compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
++nlmsg_len_14115 nlmsg_len 0 14115 NULL
++vfio_fops_compat_ioctl_14130 vfio_fops_compat_ioctl 3 14130 NULL
++ntfs_rl_replace_14136 ntfs_rl_replace 2-4 14136 NULL
++isku_sysfs_read_light_14140 isku_sysfs_read_light 6 14140 NULL
++em_canid_change_14150 em_canid_change 3 14150 NULL
++gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
++print_input_mask_14168 print_input_mask 3-0 14168 NULL
++ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
++alloc_async_14208 alloc_async 1 14208 NULL
++sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
++ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
++dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4-2 14244 NULL
++snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 2-4 14245 NULL
++reiserfs_compat_ioctl_14265 reiserfs_compat_ioctl 3 14265 NULL
++ath6kl_connect_event_14267 ath6kl_connect_event 8-9-7 14267 NULL
++add_numbered_child_14273 add_numbered_child 5 14273 NULL
++OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
++snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
++rr_status_14293 rr_status 5 14293 NULL
++read_default_ldt_14302 read_default_ldt 2 14302 NULL
++oo_objects_14319 oo_objects 0 14319 NULL
++p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
++scsi2int_14358 scsi2int 0 14358 NULL
++snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
++acpi_get_override_irq_14381 acpi_get_override_irq 1 14381 NULL
++ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
++smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
++mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
++get_kcore_size_14425 get_kcore_size 0 14425 NULL
++check_lpt_crc_14442 check_lpt_crc 0 14442 NULL
++block_size_14443 block_size 0 14443 NULL
++ci13xxx_add_device_14456 ci13xxx_add_device 3 14456 NULL
++snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
++udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
++ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
++ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
++dataflash_read_user_otp_14536 dataflash_read_user_otp 2-3 14536 NULL nohasharray
++ep0_write_14536 ep0_write 3 14536 &dataflash_read_user_otp_14536
++picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
++drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
++usb_dump_desc_14553 usb_dump_desc 0 14553 NULL
++qp_host_alloc_queue_14566 qp_host_alloc_queue 1 14566 NULL
++SyS_setdomainname_14569 SyS_setdomainname 2 14569 NULL
++remap_to_origin_then_cache_14583 remap_to_origin_then_cache 3 14583 NULL
++idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
++ceph_osdc_alloc_request_14597 ceph_osdc_alloc_request 3 14597 NULL
++ocfs2_trim_group_14641 ocfs2_trim_group 4-3 14641 NULL
++dbJoin_14644 dbJoin 0 14644 NULL
++profile_replace_14652 profile_replace 3 14652 NULL
++pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
++ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
++tsi148_master_set_14685 tsi148_master_set 4 14685 NULL
++SyS_fsetxattr_14702 SyS_fsetxattr 4 14702 NULL
++persistent_ram_ecc_string_14704 persistent_ram_ecc_string 0 14704 NULL
++u_audio_playback_14709 u_audio_playback 3 14709 NULL
++get_bio_block_14714 get_bio_block 0 14714 NULL
++vfd_write_14717 vfd_write 3 14717 NULL
++__blk_end_request_14729 __blk_end_request 3 14729 NULL
++raid1_resize_14740 raid1_resize 2 14740 NULL
++btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL
++rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
++qla82xx_pci_mem_write_2M_14765 qla82xx_pci_mem_write_2M 2 14765 NULL
++regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL
++sta_dev_read_14782 sta_dev_read 3 14782 NULL
++ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
++hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
++snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
++bcma_scan_read32_14802 bcma_scan_read32 0 14802 NULL
++do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
++__mutex_fastpath_lock_retval_14844 __mutex_fastpath_lock_retval 0 14844 NULL
++mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
++lcd_write_14857 lcd_write 3 14857 NULL nohasharray
++__krealloc_14857 __krealloc 2 14857 &lcd_write_14857
++get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
++sriov_enable_migration_14889 sriov_enable_migration 2 14889 NULL
++acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
++unifi_read_14899 unifi_read 3 14899 NULL
++SYSC_readv_14901 SYSC_readv 3 14901 NULL
++krealloc_14908 krealloc 2 14908 NULL
++regmap_irq_get_virq_14910 regmap_irq_get_virq 2 14910 NULL
++__arch_hweight64_14923 __arch_hweight64 0 14923 NULL nohasharray
++qp_memcpy_to_queue_iov_14923 qp_memcpy_to_queue_iov 2-5 14923 &__arch_hweight64_14923
++ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
++queue_cnt_14951 queue_cnt 0 14951 NULL
++videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
++mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
++setkey_14987 setkey 3 14987 NULL nohasharray
++gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987
++xfs_dinode_size_14996 xfs_dinode_size 0 14996 NULL
++vmap_15025 vmap 2 15025 NULL
++blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
++irq_get_next_irq_15053 irq_get_next_irq 1 15053 NULL
++cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
++ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
++nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
++ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5 15072 NULL
++compat_SyS_preadv_15105 compat_SyS_preadv 3 15105 NULL
++hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
++start_port_15124 start_port 0 15124 NULL
++memchr_15126 memchr 0 15126 NULL
++ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
++self_check_not_bad_15175 self_check_not_bad 0 15175 NULL
++SYSC_setdomainname_15180 SYSC_setdomainname 2 15180 NULL
++iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
++reserve_resources_15194 reserve_resources 3 15194 NULL
++bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
++il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
++div64_u64_15263 div64_u64 0-1-2 15263 NULL
++compat_raw_ioctl_15290 compat_raw_ioctl 3 15290 NULL
++sys_connect_15291 sys_connect 3 15291 NULL nohasharray
++xlate_dev_mem_ptr_15291 xlate_dev_mem_ptr 1 15291 &sys_connect_15291
++arch_enable_uv_irq_15294 arch_enable_uv_irq 2 15294 NULL
++acpi_ev_create_gpe_block_15297 acpi_ev_create_gpe_block 5 15297 NULL
++tpm_tis_init_15304 tpm_tis_init 2-3 15304 NULL
++fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
++__ocfs2_remove_xattr_range_15330 __ocfs2_remove_xattr_range 4-5-3 15330 NULL
++kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL
++ioread16_15342 ioread16 0 15342 NULL
++alloc_ring_15345 alloc_ring 2-4 15345 NULL
++acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
++graph_depth_read_15371 graph_depth_read 3 15371 NULL
++compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
++fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
++domain_flush_pages_15379 domain_flush_pages 2-3 15379 NULL
++alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
++btrfs_level_size_15392 btrfs_level_size 0 15392 NULL
++pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
++get_modalias_15406 get_modalias 2 15406 NULL
++dm_cache_resize_15422 dm_cache_resize 2 15422 NULL
++__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4 15423 NULL
++tcp_mtu_to_mss_15438 tcp_mtu_to_mss 2 15438 NULL
++hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
++memweight_15450 memweight 2 15450 NULL
++vmalloc_15464 vmalloc 1 15464 NULL
++zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
++da9052_bat_irq_15533 da9052_bat_irq 1 15533 NULL
++p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
++ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
++ieee80211_amsdu_to_8023s_15561 ieee80211_amsdu_to_8023s 5 15561 NULL
++persistent_status_15574 persistent_status 4 15574 NULL
++bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
++vme_user_write_15587 vme_user_write 3 15587 NULL
++ocfs2_truncate_rec_15595 ocfs2_truncate_rec 7 15595 NULL
++sx150x_install_irq_chip_15609 sx150x_install_irq_chip 3 15609 NULL
++iommu_device_max_index_15620 iommu_device_max_index 0-3-2-1 15620 NULL nohasharray
++compat_fillonedir_15620 compat_fillonedir 3 15620 &iommu_device_max_index_15620
++set_dis_tap_pfs_15621 set_dis_tap_pfs 3 15621 NULL
++proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
++tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
++sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642 nohasharray
++pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642
++fs_path_add_15648 fs_path_add 3 15648 NULL
++xsd_read_15653 xsd_read 3 15653 NULL
++compat_sys_fcntl_15654 compat_sys_fcntl 3 15654 NULL
++unix_bind_15668 unix_bind 3 15668 NULL
++dm_read_15674 dm_read 3 15674 NULL
++pstore_mkfile_15675 pstore_mkfile 6 15675 NULL
++uf_sme_queue_message_15697 uf_sme_queue_message 3 15697 NULL
++ocfs2_split_tree_15716 ocfs2_split_tree 5 15716 NULL
++HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
++bitmap_search_next_usable_block_15762 bitmap_search_next_usable_block 3-1 15762 NULL
++do_test_15766 do_test 1 15766 NULL
++set_std_nic_pfs_15792 set_std_nic_pfs 3 15792 NULL
++smk_read_direct_15803 smk_read_direct 3 15803 NULL
++snd_pcm_ioctl_compat_15804 snd_pcm_ioctl_compat 3 15804 NULL
++gx1_read_conf_reg_15817 gx1_read_conf_reg 0 15817 NULL nohasharray
++nameseq_list_15817 nameseq_list 3 15817 &gx1_read_conf_reg_15817 nohasharray
++gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
++afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL nohasharray
++firmware_upload_15822 firmware_upload 3 15822 &afs_proc_rootcell_write_15822
++brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
++table_size_15851 table_size 0-1-2 15851 NULL
++ubi_io_write_15870 ubi_io_write 5-4 15870 NULL nohasharray
++media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
++__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
++nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
++native_read_msr_15905 native_read_msr 0 15905 NULL
++parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
++power_read_15939 power_read 3 15939 NULL
++lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
++snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 4-2-5 15952 NULL
++remap_pci_mem_15966 remap_pci_mem 1-2 15966 NULL
++tfrc_calc_x_15975 tfrc_calc_x 1-2 15975 NULL
++frame_alloc_15981 frame_alloc 4 15981 NULL
++alloc_vm_area_15989 alloc_vm_area 1 15989 NULL
++hdpvr_register_videodev_16010 hdpvr_register_videodev 3 16010 NULL
++viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
++got_frame_16028 got_frame 2 16028 NULL
++read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL
++isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
++dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
++isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
++irq_set_chip_and_handler_name_16111 irq_set_chip_and_handler_name 1 16111 NULL
++snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
++compat_sys_select_16131 compat_sys_select 1 16131 NULL
++fsm_init_16134 fsm_init 2 16134 NULL
++hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
++ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
++bnx2i_get_cid_num_16166 bnx2i_get_cid_num 0 16166 NULL
++mapping_level_16188 mapping_level 2 16188 NULL
++cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
++SyS_pselect6_16210 SyS_pselect6 1 16210 NULL
++create_table_16213 create_table 2 16213 NULL
++atomic_read_file_16227 atomic_read_file 3 16227 NULL
++BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
++swiotlb_sync_single_for_device_16247 swiotlb_sync_single_for_device 2 16247 NULL nohasharray
++btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 &swiotlb_sync_single_for_device_16247
++mark_written_sectors_16262 mark_written_sectors 2 16262 NULL
++reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
++set_disc_pfs_16270 set_disc_pfs 3 16270 NULL
++mq_force_mapping_16277 mq_force_mapping 2 16277 NULL
++ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
++drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
++nand_bch_init_16280 nand_bch_init 3-2 16280 &drbd_setsockopt_16280
++account_16283 account 0-2-4 16283 NULL nohasharray
++mirror_status_16283 mirror_status 5 16283 &account_16283
++retry_instruction_16285 retry_instruction 2 16285 NULL
++stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
++rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL
++tfrc_invert_loss_event_rate_16295 tfrc_invert_loss_event_rate 1 16295 NULL
++rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
++wb_map_16301 wb_map 2 16301 NULL
++ext4_blocks_count_16320 ext4_blocks_count 0 16320 NULL
++vmw_cursor_update_image_16332 vmw_cursor_update_image 3-4 16332 NULL
++total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
++iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
++nl80211_send_unprot_deauth_16378 nl80211_send_unprot_deauth 4 16378 NULL
++diva_os_malloc_16406 diva_os_malloc 2 16406 NULL
++ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
++rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
++netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
++tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
++snd_interval_max_16529 snd_interval_max 0 16529 NULL
++raid10_resize_16537 raid10_resize 2 16537 NULL
++tcp_manip_pkt_16563 tcp_manip_pkt 4 16563 NULL
++lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
++agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
++virt_to_scatterlist_16582 virt_to_scatterlist 2 16582 NULL
++palmas_irq_get_virq_16613 palmas_irq_get_virq 2 16613 NULL
++btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL
++mfd_add_devices_16668 mfd_add_devices 4 16668 NULL
++ax88179_write_cmd_async_16671 ax88179_write_cmd_async 5 16671 NULL
++arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
++compat_blkdev_driver_ioctl_16769 compat_blkdev_driver_ioctl 4 16769 NULL
++blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
++i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
++get_server_iovec_16804 get_server_iovec 2 16804 NULL
++tipc_send2name_16809 tipc_send2name 6 16809 NULL
++dm_vcalloc_16814 dm_vcalloc 1-2 16814 NULL
++drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
++scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
++hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
++vfio_pci_rw_16861 vfio_pci_rw 3 16861 NULL
++alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
++carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
++st_write_16874 st_write 3 16874 NULL
++__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
++idx_to_pfn_16919 idx_to_pfn 0 16919 NULL
++psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
++snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
++_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
++squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
++cfg80211_send_unprot_disassoc_16951 cfg80211_send_unprot_disassoc 3 16951 NULL
++keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
++ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
++copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
++jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
++__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
++dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
++simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
++__kmalloc_reserve_17080 __kmalloc_reserve 1 17080 NULL
++carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
++mac_address_string_17091 mac_address_string 0 17091 NULL
++entry_length_17093 entry_length 0 17093 NULL
++sys_preadv_17100 sys_preadv 3 17100 NULL
++pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
++mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
++nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
++sep_read_17161 sep_read 3 17161 NULL
++befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
++tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
++UniStrnlen_17169 UniStrnlen 0 17169 NULL
++access_remote_vm_17189 access_remote_vm 0-2-4 17189 NULL
++driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
++iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
++dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
++to_oblock_17254 to_oblock 0-1 17254 NULL
++unpack_value_17259 unpack_value 1 17259 NULL
++__be16_to_cpup_17261 __be16_to_cpup 0 17261 NULL
++error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
++alloc_ep_17269 alloc_ep 1 17269 NULL
++pg_read_17276 pg_read 3 17276 NULL
++raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
++hmac_sha256_17278 hmac_sha256 2 17278 NULL
++neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
++minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
++install_breakpoint_17292 install_breakpoint 4 17292 NULL
++ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301 NULL
++skb_pad_17302 skb_pad 2 17302 NULL
++mb_cache_create_17307 mb_cache_create 2 17307 NULL
++gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL
++ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
++ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
++ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
++_fd_dma_mem_free_17406 _fd_dma_mem_free 1 17406 NULL
++lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
++sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
++SYSC_fcntl_17441 SYSC_fcntl 3 17441 NULL
++nla_get_u32_17455 nla_get_u32 0 17455 NULL
++__ref_totlen_17461 __ref_totlen 0 17461 NULL
++compat_cmd_17465 compat_cmd 2 17465 NULL
++probe_bios_17467 probe_bios 1 17467 NULL
++probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
++__alloc_session_17485 __alloc_session 2-1 17485 NULL
++TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
++bitmap_pos_to_ord_17503 bitmap_pos_to_ord 3 17503 NULL
++__copy_to_user_17551 __copy_to_user 0-3 17551 NULL
++copy_from_user_17559 copy_from_user 3 17559 NULL
++acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
++neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
++rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
++iwl_dump_nic_event_log_17601 iwl_dump_nic_event_log 0 17601 NULL
++wm8994_gpio_to_irq_17604 wm8994_gpio_to_irq 2 17604 NULL
++osst_execute_17607 osst_execute 7-6 17607 NULL
++ocfs2_mark_extent_written_17615 ocfs2_mark_extent_written 6 17615 NULL
++ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
++twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL
++SYSC_migrate_pages_17657 SYSC_migrate_pages 2 17657 NULL
++packet_setsockopt_17662 packet_setsockopt 5 17662 NULL nohasharray
++ubi_io_read_data_17662 ubi_io_read_data 0 17662 &packet_setsockopt_17662
++pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
++gfn_to_pfn_memslot_17693 gfn_to_pfn_memslot 2 17693 NULL
++__einj_error_trigger_17707 __einj_error_trigger 1 17707 NULL nohasharray
++venus_rename_17707 venus_rename 5-4 17707 &__einj_error_trigger_17707
++isku_sysfs_write_keys_function_17726 isku_sysfs_write_keys_function 6 17726 NULL
++exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
++sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
++mark_unsafe_pages_17759 mark_unsafe_pages 0 17759 NULL
++brcmf_usb_attach_17766 brcmf_usb_attach 2-3 17766 NULL
++dtf_read_run_17768 dtf_read_run 3 17768 NULL
++brcmf_sdio_chip_verifynvram_17776 brcmf_sdio_chip_verifynvram 4 17776 NULL
++hash_ipport6_expire_17784 hash_ipport6_expire 3 17784 NULL
++perf_clock_17787 perf_clock 0 17787 NULL
++ubifs_leb_change_17789 ubifs_leb_change 4 17789 NULL
++_snd_pcm_lib_alloc_vmalloc_buffer_17820 _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 NULL
++gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
++cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
++count_leafs_17842 count_leafs 0 17842 NULL
++sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
++alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
++ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
++orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL
++init_per_cpu_17880 init_per_cpu 1 17880 NULL
++ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
++compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
++ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
++ocfs2_clusters_to_blocks_17896 ocfs2_clusters_to_blocks 0-2 17896 NULL
++recover_head_17904 recover_head 3 17904 NULL
++dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
++xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
++srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
++scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
++ufs_free_blocks_17963 ufs_free_blocks 2-3 17963 NULL
++calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
++smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
++gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL
++ext4_num_overhead_clusters_18001 ext4_num_overhead_clusters 2 18001 NULL
++pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
++alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
++fill_read_18019 fill_read 0 18019 NULL
++o2hb_highest_node_18034 o2hb_highest_node 2 18034 NULL
++cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
++find_next_inuse_18051 find_next_inuse 2-3 18051 NULL
++ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
++lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL
++hex_byte_pack_18064 hex_byte_pack 0 18064 NULL
++packet_came_18072 packet_came 3 18072 NULL
++kvm_read_guest_page_18074 kvm_read_guest_page 5-2 18074 NULL
++SYSC_pselect6_18076 SYSC_pselect6 1 18076 NULL
++get_vm_area_18080 get_vm_area 1 18080 NULL
++SYSC_semtimedop_18091 SYSC_semtimedop 3 18091 NULL
++mpi_alloc_18094 mpi_alloc 1 18094 NULL
++dfs_file_read_18116 dfs_file_read 3 18116 NULL
++svc_getnl_18120 svc_getnl 0 18120 NULL
++paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-1-2 18131 NULL
++vmw_surface_dma_size_18132 vmw_surface_dma_size 0 18132 NULL
++selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
++_has_tag_18169 _has_tag 2 18169 NULL
++pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
++orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
++gsm_control_message_18209 gsm_control_message 4 18209 NULL
++do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
++gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
++alloc_trace_uprobe_18247 alloc_trace_uprobe 3 18247 NULL
++snd_ctl_ioctl_compat_18250 snd_ctl_ioctl_compat 3 18250 NULL
++qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
++gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
++alloc_ring_18278 alloc_ring 2-4 18278 NULL
++find_dirty_idx_leb_18280 find_dirty_idx_leb 0 18280 NULL
++nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 NULL nohasharray
++bio_phys_segments_18281 bio_phys_segments 0 18281 &nouveau_subdev_create__18281
++ext4_readpages_18283 ext4_readpages 4 18283 NULL
++mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
++um_idi_write_18293 um_idi_write 3 18293 NULL
++nouveau_disp_create__18305 nouveau_disp_create_ 4-7 18305 NULL
++ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
++vga_r_18310 vga_r 0 18310 NULL
++ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
++bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
++pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
++xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
++SyS_process_vm_readv_18366 SyS_process_vm_readv 3-5 18366 NULL
++ep_io_18367 ep_io 0 18367 NULL
++qib_user_sdma_num_pages_18371 qib_user_sdma_num_pages 0 18371 NULL
++ci_role_write_18388 ci_role_write 3 18388 NULL
++__video_register_device_18399 __video_register_device 3 18399 NULL
++hash_ip4_expire_18402 hash_ip4_expire 3 18402 NULL nohasharray
++adis16136_show_serial_18402 adis16136_show_serial 3 18402 &hash_ip4_expire_18402
++crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
++usbnet_write_cmd_nopm_18426 usbnet_write_cmd_nopm 7 18426 NULL
++batadv_orig_node_add_if_18433 batadv_orig_node_add_if 2 18433 NULL nohasharray
++iscsi_create_flashnode_sess_18433 iscsi_create_flashnode_sess 4 18433 &batadv_orig_node_add_if_18433
++snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
++fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
++regset_tls_set_18459 regset_tls_set 4 18459 NULL
++dma_alloc_from_contiguous_18466 dma_alloc_from_contiguous 3-2 18466 NULL
++pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL
++udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
++snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
++nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 5-6-9 18530 NULL
++seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
++acpi_register_gsi_ioapic_18550 acpi_register_gsi_ioapic 2 18550 NULL
++sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
++smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
++debug_output_18575 debug_output 3 18575 NULL
++check_lpt_type_18577 check_lpt_type 0 18577 NULL
++__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
++iowarrior_write_18604 iowarrior_write 3 18604 NULL
++batadv_arp_get_type_18609 batadv_arp_get_type 3 18609 NULL
++from_buffer_18625 from_buffer 3 18625 NULL
++snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
++ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL
++unmap_page_18665 unmap_page 2-3 18665 NULL
++xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL
++replay_log_leb_18704 replay_log_leb 3 18704 NULL
++unlocked_compat_ipmi_ioctl_18708 unlocked_compat_ipmi_ioctl 3 18708 NULL nohasharray
++iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 &unlocked_compat_ipmi_ioctl_18708
++ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
++ocfs2_trim_extent_18711 ocfs2_trim_extent 4-3 18711 NULL
++compat_SyS_writev_18712 compat_SyS_writev 3 18712 NULL
++blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
++snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
++o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
++__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
++wep_packets_read_18751 wep_packets_read 3 18751 NULL
++md_compat_ioctl_18764 md_compat_ioctl 4 18764 NULL
++read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
++ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
++SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL
++alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
++fat_compat_dir_ioctl_18800 fat_compat_dir_ioctl 3 18800 NULL
++ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
++madvise_hwpoison_18812 madvise_hwpoison 2 18812 NULL
++setup_ioapic_irq_18813 setup_ioapic_irq 1 18813 NULL
++sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
++mtf_test_write_18844 mtf_test_write 3 18844 NULL
++drm_ht_create_18853 drm_ht_create 2 18853 NULL
++sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
++ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
++xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
++___alloc_bootmem_node_18882 ___alloc_bootmem_node 2-3 18882 NULL
++width_to_agaw_18883 width_to_agaw 0-1 18883 NULL
++ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
++mangle_packet_18920 mangle_packet 7-9 18920 NULL
++snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
++regcache_sync_block_18963 regcache_sync_block 3-4 18963 NULL
++__nla_reserve_18974 __nla_reserve 3 18974 NULL
++gfn_to_pfn_atomic_18981 gfn_to_pfn_atomic 2 18981 NULL
++find_dirtiest_idx_leb_19001 find_dirtiest_idx_leb 0 19001 NULL
++layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
++huge_page_size_19008 huge_page_size 0 19008 NULL
++usbdev_compat_ioctl_19026 usbdev_compat_ioctl 3 19026 NULL
++prepare_highmem_image_19028 prepare_highmem_image 0 19028 NULL
++revalidate_19043 revalidate 2 19043 NULL
++drm_fb_helper_init_19044 drm_fb_helper_init 3-4 19044 NULL
++create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
++ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
++msix_map_region_19072 msix_map_region 2 19072 NULL
++ceph_create_snap_context_19082 ceph_create_snap_context 1 19082 NULL
++sys_process_vm_readv_19090 sys_process_vm_readv 3-5 19090 NULL nohasharray
++brcmf_usbdev_qinit_19090 brcmf_usbdev_qinit 2 19090 &sys_process_vm_readv_19090
++sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
++cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
++skb_gro_offset_19123 skb_gro_offset 0 19123 NULL
++ext4_inode_table_19125 ext4_inode_table 0 19125 NULL
++snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
++alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
++sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
++smk_write_access2_19170 smk_write_access2 3 19170 NULL
++iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
++vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
++__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3 19214 NULL
++dev_counters_read_19216 dev_counters_read 3 19216 NULL
++wbcir_tx_19219 wbcir_tx 3 19219 NULL
++gsi_to_irq_19220 gsi_to_irq 0-1 19220 NULL
++snd_mask_max_19224 snd_mask_max 0 19224 NULL
++bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
++sys_fcntl_19267 sys_fcntl 3 19267 NULL
++il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
++io_mapping_map_wc_19284 io_mapping_map_wc 2 19284 NULL
++qc_capture_19298 qc_capture 3 19298 NULL
++ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 3-4 19303 NULL
++event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
++gfn_to_gpa_19320 gfn_to_gpa 0-1 19320 NULL
++debug_read_19322 debug_read 3 19322 NULL
++cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL
++closure_sub_19359 closure_sub 2 19359 NULL
++read_zero_19366 read_zero 3 19366 NULL
++interpret_user_input_19393 interpret_user_input 2 19393 NULL
++sync_fill_pt_info_19397 sync_fill_pt_info 0 19397 NULL
++get_n_events_by_type_19401 get_n_events_by_type 0 19401 NULL
++dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
++__phys_addr_19434 __phys_addr 0 19434 NULL
++SyS_sched_getaffinity_19444 SyS_sched_getaffinity 2 19444 NULL
++xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
++hpet_compat_ioctl_19455 hpet_compat_ioctl 3 19455 NULL
++gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
++sky2_read16_19475 sky2_read16 0 19475 NULL
++efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
++ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
++skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
++dev_alloc_skb_19517 dev_alloc_skb 1 19517 NULL
++nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
++gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
++ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
++ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL nohasharray
++wlcore_hw_get_rx_packet_len_19565 wlcore_hw_get_rx_packet_len 0 19565 &ieee80211_if_read_tkip_mic_test_19565
++nfsd_read_19568 nfsd_read 5 19568 NULL
++cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
++bm_status_read_19583 bm_status_read 3 19583 NULL
++batadv_tt_update_orig_19586 batadv_tt_update_orig 4 19586 NULL
++load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
++__mei_cl_recv_19636 __mei_cl_recv 3 19636 NULL
++usbvision_rvmalloc_19655 usbvision_rvmalloc 1 19655 NULL
++LoadBitmap_19658 LoadBitmap 2 19658 NULL
++usbnet_write_cmd_19679 usbnet_write_cmd 7 19679 NULL
++bio_detain_19690 bio_detain 2 19690 NULL
++mem_cgroup_swappiness_19718 mem_cgroup_swappiness 0 19718 NULL
++read_reg_19723 read_reg 0 19723 NULL
++wm8350_block_write_19727 wm8350_block_write 3-2 19727 NULL
++memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
++snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
++p9_client_read_19750 p9_client_read 5 19750 NULL
++pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
++ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
++jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
++__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
++saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
++irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
++pcpu_next_unpop_19831 pcpu_next_unpop 4 19831 NULL
++vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
++security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
++crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
++cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
++__nla_put_19857 __nla_put 3 19857 NULL
++ip6gre_err_19869 ip6gre_err 5 19869 NULL
++mrp_request_join_19882 mrp_request_join 4 19882 NULL
++aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
++ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
++cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
++iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
++attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
++diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
++SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL
++split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
++alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
++btrfs_pin_extent_for_log_replay_20069 btrfs_pin_extent_for_log_replay 2 20069 NULL
++rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
++fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
++team_options_register_20091 team_options_register 3 20091 NULL
++qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
++hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
++tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
++read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
++wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
++create_trace_probe_20175 create_trace_probe 1 20175 NULL
++crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
++udf_bitmap_new_block_20214 udf_bitmap_new_block 4 20214 NULL
++pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
++rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
++tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
++pcpu_alloc_20255 pcpu_alloc 1-2 20255 NULL
++_rtl92s_get_h2c_cmdlen_20312 _rtl92s_get_h2c_cmdlen 0 20312 NULL
++tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
++snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
++gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
++handle_arr_calc_size_20355 handle_arr_calc_size 0-1 20355 NULL
++qla82xx_pci_mem_read_direct_20368 qla82xx_pci_mem_read_direct 2 20368 NULL
++smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
++u64_to_uptr_20384 u64_to_uptr 1 20384 NULL
++snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL
++__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
++xen_create_contiguous_region_20457 xen_create_contiguous_region 1 20457 NULL
++nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
++dec_zcache_pers_zpages_20465 dec_zcache_pers_zpages 1 20465 NULL
++compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
++read_buf_20469 read_buf 2 20469 NULL
++btrfs_get_32_20476 btrfs_get_32 0 20476 NULL
++fast_user_write_20494 fast_user_write 5 20494 NULL
++ocfs2_db_frozen_trigger_20503 ocfs2_db_frozen_trigger 4 20503 NULL nohasharray
++hidraw_report_event_20503 hidraw_report_event 3 20503 &ocfs2_db_frozen_trigger_20503
++pcpu_alloc_area_20511 pcpu_alloc_area 0-3 20511 NULL
++pcpu_depopulate_chunk_20517 pcpu_depopulate_chunk 2-3 20517 NULL
++xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
++drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
++amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
++scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
++venus_create_20555 venus_create 4 20555 NULL
++btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL
++crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
++i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
++batadv_tt_append_diff_20588 batadv_tt_append_diff 4 20588 NULL
++sync_timeline_create_20601 sync_timeline_create 2 20601 NULL
++lirc_write_20604 lirc_write 3 20604 NULL
++qib_qsfp_write_20614 qib_qsfp_write 0-4-2 20614 NULL
++snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
++kfifo_copy_to_user_20646 kfifo_copy_to_user 3 20646 NULL
++cpulist_scnprintf_20648 cpulist_scnprintf 0-2 20648 NULL
++oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
++oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
++snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
++get_user_page_nowait_20682 get_user_page_nowait 3 20682 NULL nohasharray
++dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 &get_user_page_nowait_20682
++cpumask_size_20683 cpumask_size 0 20683 NULL
++btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
++gru_vtop_20689 gru_vtop 2 20689 NULL
++read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
++__maestro_read_20700 __maestro_read 0 20700 NULL
++cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
++pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
++ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
++security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
++vring_add_indirect_20737 vring_add_indirect 3-4 20737 NULL
++io_apic_set_pci_routing_20740 io_apic_set_pci_routing 2 20740 NULL
++vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
++ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
++brcmf_p2p_escan_20763 brcmf_p2p_escan 2 20763 NULL
++ubi_io_read_20767 ubi_io_read 0 20767 NULL
++ext4_r_blocks_count_20768 ext4_r_blocks_count 0 20768 NULL
++fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
++iommu_range_alloc_20794 iommu_range_alloc 3 20794 NULL
++iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
++sys_sendto_20809 sys_sendto 6 20809 NULL
++cfv_alloc_and_copy_skb_20812 cfv_alloc_and_copy_skb 4 20812 NULL
++strndup_user_20819 strndup_user 2 20819 NULL
++calc_layout_20829 calc_layout 3 20829 NULL
++dtf_read_channel_20831 dtf_read_channel 3 20831 NULL
++wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
++uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
++snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
++ocfs2_bmap_20874 ocfs2_bmap 2 20874 NULL
++skb_tail_pointer_20878 skb_tail_pointer 0 20878 NULL
++sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
++key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
++vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
++compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
++htable_bits_20933 htable_bits 0 20933 NULL
++altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
++rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
++nfs_map_name_to_uid_20962 nfs_map_name_to_uid 3 20962 NULL
++snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
++brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
++alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
++ocfs2_free_clusters_21001 ocfs2_free_clusters 4 21001 NULL
++ceph_osdc_new_request_21017 ceph_osdc_new_request 14-4 21017 NULL
++btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
++rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
++lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
++proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
++event_calibration_read_21083 event_calibration_read 3 21083 NULL
++compat_sock_ioctl_trans_21092 compat_sock_ioctl_trans 4 21092 NULL
++multipath_status_21094 multipath_status 5 21094 NULL
++__cfg80211_send_disassoc_21096 __cfg80211_send_disassoc 3 21096 NULL
++ext2_valid_block_bitmap_21101 ext2_valid_block_bitmap 3 21101 NULL
++ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
++bitset_size_in_bytes_21124 bitset_size_in_bytes 0-1 21124 NULL
++i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
++tps6586x_irq_init_21144 tps6586x_irq_init 3 21144 NULL
++ocfs2_block_check_validate_21149 ocfs2_block_check_validate 2 21149 NULL
++alloc_pg_vec_21159 alloc_pg_vec 3 21159 NULL
++cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
++ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
++scsi_execute_req_flags_21215 scsi_execute_req_flags 5 21215 NULL
++_ocfs2_free_clusters_21220 _ocfs2_free_clusters 4 21220 NULL
++get_numpages_21227 get_numpages 0-1-2 21227 NULL
++SyS_mlock_21238 SyS_mlock 1 21238 NULL
++input_ff_create_21240 input_ff_create 2 21240 NULL
++cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
++ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
++ip_vs_icmp_xmit_21269 ip_vs_icmp_xmit 4 21269 NULL
++make_alloc_exact_21279 make_alloc_exact 1-3 21279 NULL
++vmw_gmr2_bind_21305 vmw_gmr2_bind 3 21305 NULL
++do_msg_fill_21307 do_msg_fill 3 21307 NULL
++add_res_range_21310 add_res_range 4 21310 NULL
++get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
++ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
++gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
++max77693_irq_domain_map_21357 max77693_irq_domain_map 2 21357 NULL
++alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
++SYSC_rt_sigpending_21379 SYSC_rt_sigpending 2 21379 NULL
++video_ioctl2_21380 video_ioctl2 2 21380 NULL
++diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
++snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
++snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
++tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
++tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
++aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
++__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL
++concat_writev_21451 concat_writev 3 21451 NULL
++mei_nfc_send_21477 mei_nfc_send 3 21477 NULL
++read_file_xmit_21487 read_file_xmit 3 21487 NULL
++mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
++btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
++il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
++cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
++rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
++rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
++xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL
++ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
++compat_SyS_pwritev64_21606 compat_SyS_pwritev64 3 21606 NULL
++__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
++validate_nnode_21638 validate_nnode 0 21638 NULL
++__irq_alloc_descs_21639 __irq_alloc_descs 2-1-3 21639 NULL
++carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
++atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
++ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
++regmap_register_patch_21681 regmap_register_patch 3 21681 NULL
++rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
++evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
++reiserfs_allocate_list_bitmaps_21732 reiserfs_allocate_list_bitmaps 3 21732 NULL
++vm_brk_21739 vm_brk 1 21739 NULL
++__nf_nat_mangle_tcp_packet_21744 __nf_nat_mangle_tcp_packet 8-6 21744 NULL
++mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
++gen_pool_add_21776 gen_pool_add 3 21776 NULL
++xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
++__ioremap_caller_21800 __ioremap_caller 1-2 21800 NULL
++min_odd_21802 min_odd 0 21802 NULL
++dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
++wm8994_request_irq_21822 wm8994_request_irq 2 21822 NULL
++oom_adj_read_21847 oom_adj_read 3 21847 NULL
++acpi_tb_check_xsdt_21862 acpi_tb_check_xsdt 1 21862 NULL
++lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
++brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL
++sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL nohasharray
++tcp_cookie_size_check_21873 tcp_cookie_size_check 0-1 21873 &sisusbcon_bmove_21873
++xen_swiotlb_map_page_21886 xen_swiotlb_map_page 3 21886 NULL
++__alloc_reserved_percpu_21895 __alloc_reserved_percpu 1-2 21895 NULL
++rio_destid_first_21900 rio_destid_first 0 21900 NULL
++dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
++qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
++security_mmap_addr_21970 security_mmap_addr 0 21970 NULL
++alloc_ldt_21972 alloc_ldt 2 21972 NULL
++SYSC_prctl_21980 SYSC_prctl 4 21980 NULL
++rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL nohasharray
++compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 0-3 22001 &rxpipe_descr_host_int_trig_rx_data_read_22001
++regcache_sync_block_raw_flush_22021 regcache_sync_block_raw_flush 3-4 22021 NULL
++btrfs_get_16_22023 btrfs_get_16 0 22023 NULL
++_sp2d_min_pg_22032 _sp2d_min_pg 0 22032 NULL
++zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
++ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
++btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2 22077 NULL
++write_opcode_22082 write_opcode 2 22082 NULL
++mem_rw_22085 mem_rw 3 22085 NULL
++is_swbp_at_addr_22089 is_swbp_at_addr 2 22089 NULL
++lowpan_fragment_xmit_22095 lowpan_fragment_xmit 3-4 22095 NULL
++sched_clock_cpu_22098 sched_clock_cpu 0 22098 NULL
++qlcnic_sriov_pf_enable_22103 qlcnic_sriov_pf_enable 2 22103 NULL
++sys_remap_file_pages_22124 sys_remap_file_pages 1 22124 NULL
++__bitmap_size_22138 __bitmap_size 0 22138 NULL
++compat_insn_22142 compat_insn 2 22142 NULL
++do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL
++__kfifo_alloc_22173 __kfifo_alloc 3 22173 NULL
++fls_22210 fls 0 22210 NULL
++mem_write_22232 mem_write 3 22232 NULL
++p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
++atomic64_xchg_22246 atomic64_xchg 0 22246 NULL
++compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
++__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
++queue_max_sectors_22280 queue_max_sectors 0 22280 NULL
++pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 NULL nohasharray
++__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 &pci_vpd_srdt_size_22300
++extend_brk_22301 extend_brk 0 22301 NULL
++mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
++C_SYSC_msgrcv_22320 C_SYSC_msgrcv 2-3 22320 NULL
++get_segment_base_22324 get_segment_base 0 22324 NULL
++radix_tree_find_next_bit_22334 radix_tree_find_next_bit 2-3 22334 NULL
++atomic_read_22342 atomic_read 0 22342 NULL
++mlx4_db_alloc_22358 mlx4_db_alloc 3 22358 NULL
++irq_reserve_irq_22360 irq_reserve_irq 1 22360 NULL nohasharray
++memcg_size_22360 memcg_size 0 22360 &irq_reserve_irq_22360
++snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
++tps6586x_gpio_to_irq_22365 tps6586x_gpio_to_irq 2 22365 NULL
++evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
++alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
++btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
++crash_shrink_memory_22401 crash_shrink_memory 1 22401 NULL
++zoran_write_22404 zoran_write 3 22404 NULL
++queue_reply_22416 queue_reply 3 22416 NULL
++__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
++queue_max_segments_22441 queue_max_segments 0 22441 NULL
++handle_received_packet_22457 handle_received_packet 3 22457 NULL
++mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
++batadv_check_unicast_packet_22468 batadv_check_unicast_packet 3 22468 NULL
++dtf_write_device_22471 dtf_write_device 3 22471 NULL
++cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
++mp_find_ioapic_pin_22499 mp_find_ioapic_pin 0-2 22499 NULL
++mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
++ip4_addr_string_22511 ip4_addr_string 0 22511 NULL
++swiotlb_tbl_unmap_single_22522 swiotlb_tbl_unmap_single 2 22522 NULL nohasharray
++usb_dump_config_descriptor_22522 usb_dump_config_descriptor 0 22522 &swiotlb_tbl_unmap_single_22522
++pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
++ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
++atomic_long_read_unchecked_22551 atomic_long_read_unchecked 0 22551 NULL
++agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
++dbFindCtl_22587 dbFindCtl 0 22587 NULL
++snapshot_read_22601 snapshot_read 3 22601 NULL
++remove_breakpoint_22628 remove_breakpoint 3 22628 NULL
++sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
++ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
++wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
++pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
++iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
++compat_SyS_msgrcv_22661 compat_SyS_msgrcv 2-3 22661 NULL
++ubifs_leb_write_22679 ubifs_leb_write 4-5 22679 NULL
++qlcnic_83xx_sysfs_flash_write_handler_22680 qlcnic_83xx_sysfs_flash_write_handler 6 22680 NULL
++ocfs2_get_block_22687 ocfs2_get_block 2 22687 NULL
++compat_fd_ioctl_22694 compat_fd_ioctl 4 22694 NULL
++map_22700 map 2 22700 NULL
++alloc_libipw_22708 alloc_libipw 1 22708 NULL
++brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
++cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4 22735 NULL
++ceph_decode_32_22738 ceph_decode_32 0 22738 NULL nohasharray
++__mei_cl_send_22738 __mei_cl_send 3 22738 &ceph_decode_32_22738
++iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
++qlcnic_sriov_init_22762 qlcnic_sriov_init 2 22762 NULL
++print_frame_22769 print_frame 0 22769 NULL
++ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
++compat_blkdev_ioctl_22841 compat_blkdev_ioctl 3 22841 NULL
++clone_bio_integrity_22842 clone_bio_integrity 4 22842 NULL
++read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
++do_atm_iobuf_22857 do_atm_iobuf 3 22857 NULL
++create_attr_set_22861 create_attr_set 1 22861 NULL
++vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
++usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
++mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
++policy_emit_config_values_22900 policy_emit_config_values 3 22900 NULL
++pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
++alloc_sglist_22960 alloc_sglist 1-2-3 22960 NULL
++caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
++vme_get_size_22964 vme_get_size 0 22964 NULL
++tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
++page_table_range_init_count_22977 page_table_range_init_count 0 22977 NULL
++usb_get_langid_22983 usb_get_langid 0 22983 NULL
++set_msr_hyperv_22985 set_msr_hyperv 3 22985 NULL
++remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
++brcmf_sdio_chip_exit_download_23001 brcmf_sdio_chip_exit_download 4 23001 NULL
++viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
++cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
++st_status_23032 st_status 5 23032 NULL
++nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
++reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL
++mei_cl_send_23068 mei_cl_send 3 23068 NULL
++kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
++vm_map_ram_23078 vm_map_ram 2 23078 NULL nohasharray
++raw_sendmsg_23078 raw_sendmsg 4 23078 &vm_map_ram_23078
++get_user_hdr_len_23079 get_user_hdr_len 0 23079 NULL
++qla4_82xx_pci_mem_read_2M_23081 qla4_82xx_pci_mem_read_2M 2 23081 NULL
++isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
++lnw_gpio_irq_map_23087 lnw_gpio_irq_map 2 23087 NULL
++rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
++fls_long_23096 fls_long 0 23096 NULL
++ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
++pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
++mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
++nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
++__clear_user_23118 __clear_user 0 23118 NULL
++dm_write_async_23120 dm_write_async 3 23120 NULL
++drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
++ca91cx42_master_set_23146 ca91cx42_master_set 4 23146 NULL
++read_file_ani_23161 read_file_ani 3 23161 NULL
++ioremap_23172 ioremap 1-2 23172 NULL
++tg_get_cfs_quota_23176 tg_get_cfs_quota 0 23176 NULL
++usblp_write_23178 usblp_write 3 23178 NULL
++msnd_fifo_alloc_23179 msnd_fifo_alloc 2 23179 NULL
++gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
++ieee80211_get_mesh_hdrlen_23183 ieee80211_get_mesh_hdrlen 0 23183 NULL
++fix_unclean_leb_23188 fix_unclean_leb 3 23188 NULL
++mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
++convert_ip_to_linear_23198 convert_ip_to_linear 0 23198 NULL
++pm80x_free_irq_23210 pm80x_free_irq 2 23210 NULL nohasharray
++compat_rawv6_ioctl_23210 compat_rawv6_ioctl 3 23210 &pm80x_free_irq_23210
++tty_buffer_request_room_23228 tty_buffer_request_room 2 23228 NULL
++xlog_get_bp_23229 xlog_get_bp 2 23229 NULL
++rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
++__gfn_to_rmap_23240 __gfn_to_rmap 1-2 23240 NULL
++uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
++ipv6_skip_exthdr_23283 ipv6_skip_exthdr 0-2 23283 NULL
++doc_probe_23285 doc_probe 1 23285 NULL
++diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
++perf_mmap_free_page_23302 perf_mmap_free_page 1 23302 NULL
++i2cdev_write_23310 i2cdev_write 3 23310 NULL
++mc13xxx_get_num_regulators_dt_23344 mc13xxx_get_num_regulators_dt 0 23344 NULL
++page_readlink_23346 page_readlink 3 23346 NULL
++get_dst_timing_23358 get_dst_timing 0 23358 NULL
++fd_setup_write_same_buf_23369 fd_setup_write_same_buf 3 23369 NULL
++iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
++vga_mm_r_23419 vga_mm_r 0 23419 NULL
++vzalloc_node_23424 vzalloc_node 1 23424 NULL
++ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 NULL
++hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
++linear_conf_23485 linear_conf 2 23485 NULL nohasharray
++divasa_remap_pci_bar_23485 divasa_remap_pci_bar 3-4 23485 &linear_conf_23485
++event_filter_read_23494 event_filter_read 3 23494 NULL
++__gfn_to_hva_many_23508 __gfn_to_hva_many 0-2 23508 NULL
++ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
++xen_allocate_irq_gsi_23546 xen_allocate_irq_gsi 1-0 23546 NULL
++tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
++dbg_leb_change_23555 dbg_leb_change 4 23555 NULL
++venus_symlink_23570 venus_symlink 4-6 23570 NULL
++iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
++snd_interval_min_23590 snd_interval_min 0 23590 NULL
++do_mmap_pgoff_23600 do_mmap_pgoff 0 23600 NULL
++_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
++islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
++__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
++ext3_compat_ioctl_23659 ext3_compat_ioctl 3 23659 NULL
++sInW_23663 sInW 0 23663 NULL
++SyS_connect_23669 SyS_connect 3 23669 NULL
++proc_ioctl_compat_23682 proc_ioctl_compat 2 23682 NULL
++nftl_partscan_23688 nftl_partscan 0 23688 NULL
++cx18_read_23699 cx18_read 3 23699 NULL
++isku_sysfs_write_control_23718 isku_sysfs_write_control 6 23718 NULL
++mp_config_acpi_gsi_23728 mp_config_acpi_gsi 2 23728 NULL
++pack_sg_list_p_23739 pack_sg_list_p 0-2 23739 NULL
++rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
++__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
++__build_packet_message_23778 __build_packet_message 10-4 23778 NULL
++security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
++diva_alloc_dma_map_23798 diva_alloc_dma_map 2 23798 NULL
++rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
++__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2-3 23824 NULL
++ceph_copy_page_vector_to_user_23829 ceph_copy_page_vector_to_user 3-4 23829 NULL
++tfrc_binsearch_23833 tfrc_binsearch 0 23833 NULL
++xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
++pgdat_end_pfn_23842 pgdat_end_pfn 0 23842 NULL
++iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
++p54_init_common_23850 p54_init_common 1 23850 NULL
++gart_alloc_coherent_23852 gart_alloc_coherent 2 23852 NULL
++bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
++ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
++ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
++nes_alloc_resource_23891 nes_alloc_resource 3 23891 NULL
++tipc_snprintf_23893 tipc_snprintf 2 23893 NULL
++add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
++ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911
++f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL
++ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL nohasharray
++mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 &ipath_reg_phys_mr_23918
++kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
++__alloc_skb_23940 __alloc_skb 1 23940 NULL
++uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL
++cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
++zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
++cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
++dgrp_send_24028 dgrp_send 0-2 24028 NULL
++ocfs2_mark_extent_refcounted_24035 ocfs2_mark_extent_refcounted 6 24035 NULL
++adis16400_show_serial_number_24037 adis16400_show_serial_number 3 24037 NULL
++afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
++brcmf_sdio_ramrw_24074 brcmf_sdio_ramrw 5 24074 NULL
++blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
++vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
++pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
++request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
++mpu401_read_24126 mpu401_read 3 24126 NULL
++irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
++trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
++set_discard_24162 set_discard 2 24162 NULL
++adu_read_24177 adu_read 3 24177 NULL
++safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
++ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
++efx_vf_size_24213 efx_vf_size 0 24213 NULL
++tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
++pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 1-2-3 24224 NULL nohasharray
++mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224
++pci_num_vf_24235 pci_num_vf 0 24235 NULL
++sel_read_bool_24236 sel_read_bool 3 24236 NULL
++dm_cache_save_hint_24257 dm_cache_save_hint 2 24257 NULL
++em28xx_alloc_urbs_24260 em28xx_alloc_urbs 4-6 24260 NULL
++thin_status_24278 thin_status 5 24278 NULL
++compat_sys_preadv64_24283 compat_sys_preadv64 3 24283 NULL
++msg_size_24288 msg_size 0 24288 NULL
++ext2_free_blocks_24292 ext2_free_blocks 2-3 24292 NULL
++map_page_24298 map_page 3-4 24298 NULL
++btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
++reserve_metadata_bytes_24313 reserve_metadata_bytes 3 24313 NULL
++ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
++ocfs2_direct_IO_get_blocks_24333 ocfs2_direct_IO_get_blocks 2 24333 NULL
++si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL
++C_SYSC_pwritev_24345 C_SYSC_pwritev 3 24345 NULL
++kzalloc_node_24352 kzalloc_node 1 24352 NULL
++qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
++cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
++btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
++igetword_24373 igetword 0 24373 NULL
++pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
++getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398
++blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
++b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
++iwl_nvm_read_section_24438 iwl_nvm_read_section 0 24438 NULL
++ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
++smk_user_access_24440 smk_user_access 3 24440 NULL
++page_address_24444 page_address 0 24444 NULL
++evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
++ocfs2_write_cluster_by_desc_24466 ocfs2_write_cluster_by_desc 5-6 24466 NULL
++read_file_spec_scan_ctl_24491 read_file_spec_scan_ctl 3 24491 NULL
++pd_video_read_24510 pd_video_read 3 24510 NULL
++request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
++xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
++named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
++do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
++write_cache_pages_24562 write_cache_pages 0 24562 NULL
++tsi148_alloc_resource_24563 tsi148_alloc_resource 2 24563 NULL
++udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
++count_preds_24600 count_preds 0 24600 NULL
++sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605 NULL
++kvm_pv_enable_async_pf_24637 kvm_pv_enable_async_pf 2 24637 NULL
++context_alloc_24645 context_alloc 3 24645 NULL
++blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
++unifi_net_data_malloc_24716 unifi_net_data_malloc 3 24716 NULL
++read_fs_24717 read_fs 0 24717 NULL
++simple_attr_read_24738 simple_attr_read 3 24738 NULL
++qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
++ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
++get_dma_residue_24749 get_dma_residue 0 24749 NULL
++kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
++nfsd4_sanitize_slot_size_24756 nfsd4_sanitize_slot_size 0-1 24756 NULL
++i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
++ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
++skb_make_writable_24783 skb_make_writable 2 24783 NULL
++datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
++cache_read_24790 cache_read 3 24790 NULL
++unpack_str_24798 unpack_str 0 24798 NULL
++__next_cpu_nr_24805 __next_cpu_nr 1 24805 NULL
++comedi_buf_alloc_24822 comedi_buf_alloc 3 24822 NULL
++snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
++snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
++pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
++l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
++setup_buffering_24872 setup_buffering 3 24872 NULL
++bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 3-2 24873 NULL
++queues_read_24877 queues_read 3 24877 NULL nohasharray
++symbol_string_24877 symbol_string 0 24877 &queues_read_24877
++codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
++v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL
++next_token_24929 next_token 0 24929 NULL
++uf_create_device_nodes_24948 uf_create_device_nodes 2 24948 NULL
++ocfs2_fiemap_24949 ocfs2_fiemap 3-4 24949 NULL
++packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
++sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
++ensure_wear_leveling_24971 ensure_wear_leveling 0 24971 NULL
++twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL
++nf_nat_sdp_port_24977 nf_nat_sdp_port 7 24977 NULL
++llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
++key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
++il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
++ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
++nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
++gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
++SYSC_listxattr_25072 SYSC_listxattr 3 25072 NULL
++ceph_osdc_writepages_25085 ceph_osdc_writepages 5 25085 NULL
++snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
++sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
++ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
++sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
++ks8851_rdreg32_25187 ks8851_rdreg32 0 25187 NULL
++ocfs2_block_check_compute_25223 ocfs2_block_check_compute 2 25223 NULL
++free_memcg_kmem_pages_25228 free_memcg_kmem_pages 1 25228 NULL
++dtf_write_string_25232 dtf_write_string 5 25232 NULL
++mon_stat_read_25238 mon_stat_read 3 25238 NULL
++tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
++nilfs_palloc_find_available_slot_25245 nilfs_palloc_find_available_slot 3-5 25245 NULL
++stripe_status_25259 stripe_status 5 25259 NULL
++snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
++crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
++vfs_writev_25278 vfs_writev 3 25278 NULL
++l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
++snd_seq_ioctl_compat_25307 snd_seq_ioctl_compat 3 25307 NULL
++help_25316 help 5 25316 NULL nohasharray
++ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 &help_25316
++rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
++SYSC_kexec_load_25361 SYSC_kexec_load 2 25361 NULL
++rio_destid_next_25368 rio_destid_next 2 25368 NULL nohasharray
++unix_mkname_25368 unix_mkname 0-2 25368 &rio_destid_next_25368
++sel_read_mls_25369 sel_read_mls 3 25369 NULL
++tc3589x_gpio_to_irq_25371 tc3589x_gpio_to_irq 2 25371 NULL
++ebt_buf_add_pad_25413 ebt_buf_add_pad 0 25413 NULL
++dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
++ath6kl_wmi_beginscan_cmd_25462 ath6kl_wmi_beginscan_cmd 8 25462 NULL
++generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
++crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
++ocfs2_hamming_encode_25501 ocfs2_hamming_encode 3 25501 NULL
++ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4 25502 NULL
++snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
++sb_permission_25523 sb_permission 0 25523 NULL
++ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
++ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
++wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
++ht_print_chan_25556 ht_print_chan 0 25556 NULL
++skb_tailroom_25567 skb_tailroom 0 25567 NULL
++find_extend_vma_25597 find_extend_vma 2 25597 NULL
++__devres_alloc_25598 __devres_alloc 2 25598 NULL
++copy_user_generic_25611 copy_user_generic 0 25611 NULL
++proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
++__get_user_pages_25628 __get_user_pages 0-3-4 25628 NULL nohasharray
++befs_utf2nls_25628 befs_utf2nls 3 25628 &__get_user_pages_25628
++__direct_map_25647 __direct_map 5-6 25647 NULL
++ext2_try_to_allocate_25667 ext2_try_to_allocate 4-2 25667 NULL
++aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
++sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
++ebitmap_start_positive_25703 ebitmap_start_positive 0 25703 NULL
++rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
++ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
++sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
++__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2-3 25726 &sel_write_context_25726
++mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
++ext2_find_near_25734 ext2_find_near 0 25734 NULL
++__set_clear_dirty_25744 __set_clear_dirty 2 25744 NULL
++cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
++dtf_write_channel_25748 dtf_write_channel 3 25748 NULL
++event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
++sg_read_25799 sg_read 3 25799 NULL
++system_enable_read_25815 system_enable_read 3 25815 NULL
++realloc_buffer_25816 realloc_buffer 2 25816 NULL
++mthca_map_user_db_25823 mthca_map_user_db 5 25823 NULL
++pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
++parport_read_25855 parport_read 0 25855 NULL
++xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
++uf_ap_process_data_pdu_25860 uf_ap_process_data_pdu 7 25860 NULL
++key_attr_size_25865 key_attr_size 0 25865 NULL
++ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
++run_delalloc_nocow_25896 run_delalloc_nocow 3 25896 NULL
++sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
++lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
++nvme_trans_mode_page_create_25908 nvme_trans_mode_page_create 7 25908 NULL
++do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
++rcname_read_25919 rcname_read 3 25919 NULL
++snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
++key_flags_read_25931 key_flags_read 3 25931 NULL
++copy_play_buf_25932 copy_play_buf 3 25932 NULL
++flush_25957 flush 2 25957 NULL
++video_register_device_25971 video_register_device 3 25971 NULL
++udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
++ebt_compat_entry_padsize_26001 ebt_compat_entry_padsize 0 26001 NULL
++lpfc_sli_probe_sriov_nr_virtfn_26004 lpfc_sli_probe_sriov_nr_virtfn 2 26004 NULL
++irq_create_strict_mappings_26025 irq_create_strict_mappings 2-4 26025 NULL
++xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
++skb_mac_header_26034 skb_mac_header 0 26034 NULL
++mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
++selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
++tun_do_read_26047 tun_do_read 5 26047 NULL
++__alloc_memory_core_early_26053 __alloc_memory_core_early 2-3 26053 NULL
++keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
++rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
++skb_cow_26138 skb_cow 2 26138 NULL
++usb_dump_device_strings_26146 usb_dump_device_strings 0 26146 NULL
++copy_oldmem_page_26164 copy_oldmem_page 3-1 26164 NULL
++gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
++ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
++perf_adjust_period_26168 perf_adjust_period 2-3 26168 NULL
++mid_get_vbt_data_r1_26170 mid_get_vbt_data_r1 2 26170 NULL
++disk_devt_26180 disk_devt 0 26180 NULL
++get_registers_26187 get_registers 3 26187 NULL
++cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
++ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
++xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
++mce_write_26201 mce_write 3 26201 NULL
++_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
++bio_split_26235 bio_split 2 26235 NULL
++crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
++wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
++ext2_find_goal_26306 ext2_find_goal 0 26306 NULL
++snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL nohasharray
++pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
++pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
++efx_rx_mk_skb_26342 efx_rx_mk_skb 5 26342 NULL
++ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 5 26357 NULL
++cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
++dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
++invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
++dma_declare_contiguous_26455 dma_declare_contiguous 2 26455 NULL
++ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
++ulong_write_file_26485 ulong_write_file 3 26485 NULL
++dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
++read_vmcore_26501 read_vmcore 3 26501 NULL
++vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 3-4 26507 NULL
++iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
++__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
++ip6_addr_string_26568 ip6_addr_string 0 26568 NULL
++kvm_iommu_put_pages_26571 kvm_iommu_put_pages 2 26571 NULL
++rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL nohasharray
++batadv_receive_server_sync_packet_26577 batadv_receive_server_sync_packet 3 26577 &rts51x_read_mem_26577
++cirrusfb_get_memsize_26597 cirrusfb_get_memsize 0 26597 NULL
++regcache_set_reg_present_26598 regcache_set_reg_present 2 26598 NULL
++__unmap_single_26604 __unmap_single 2-3 26604 NULL
++iommu_alloc_26621 iommu_alloc 4 26621 NULL
++pack_value_26625 pack_value 1 26625 NULL
++pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
++irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
++inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
++cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
++__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
++rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
++aty_ld_le32_26720 aty_ld_le32 0 26720 NULL
++nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
++SyS_fcntl_26737 SyS_fcntl 3 26737 NULL
++pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
++srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
++snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
++qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
++cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
++iwl_trans_read_mem32_26825 iwl_trans_read_mem32 0 26825 NULL
++smk_write_load_26829 smk_write_load 3 26829 NULL
++sizeof_pwm_leds_priv_26830 sizeof_pwm_leds_priv 0-1 26830 NULL
++slgt_compat_ioctl_26834 slgt_compat_ioctl 3 26834 NULL
++__nodes_onto_26838 __nodes_onto 4 26838 NULL
++scnprint_id_26842 scnprint_id 3 26842 NULL
++ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
++netxen_nic_hw_read_wx_128M_26858 netxen_nic_hw_read_wx_128M 2 26858 NULL
++svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
++ext2_compat_ioctl_26883 ext2_compat_ioctl 3 26883 NULL
++slhc_uncompress_26905 slhc_uncompress 0-3 26905 NULL
++x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
++compat_mtw_from_user_26932 compat_mtw_from_user 0 26932 NULL
++scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
++pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
++sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
++carl9170_handle_mpdu_26940 carl9170_handle_mpdu 3 26940 NULL nohasharray
++create_bm_block_list_26940 create_bm_block_list 0 26940 &carl9170_handle_mpdu_26940
++hecubafb_write_26942 hecubafb_write 3 26942 NULL
++extract_entropy_user_26952 extract_entropy_user 3 26952 NULL nohasharray
++do_trimming_26952 do_trimming 3 26952 &extract_entropy_user_26952
++pcf857x_irq_domain_map_26998 pcf857x_irq_domain_map 2 26998 NULL
++swiotlb_bounce_27046 swiotlb_bounce 2-1 27046 NULL
++ufs_alloc_fragments_27059 ufs_alloc_fragments 3-0-2 27059 NULL
++__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
++snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
++paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
++alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
++compat_SyS_rt_sigpending_27084 compat_SyS_rt_sigpending 2 27084 NULL
++find_first_bit_27088 find_first_bit 0-2 27088 NULL
++btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
++nes_reg_user_mr_27106 nes_reg_user_mr 2-3 27106 NULL
++__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
++SYSC_ipc_27123 SYSC_ipc 3 27123 NULL
++get_kernel_page_27133 get_kernel_page 0 27133 NULL
++drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
++pms_capture_27142 pms_capture 4 27142 NULL
++btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
++snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL
++i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
++ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
++mmc_blk_compat_ioctl_27194 mmc_blk_compat_ioctl 4 27194 NULL
++dbAllocAG_27228 dbAllocAG 0 27228 NULL
++rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
++cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
++__dma_map_cont_27289 __dma_map_cont 5 27289 NULL
++hpi_read_reg_27302 hpi_read_reg 0 27302 NULL
++copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
++virtqueue_add_inbuf_27312 virtqueue_add_inbuf 3 27312 NULL nohasharray
++ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 &virtqueue_add_inbuf_27312
++ocfs2_blocks_to_clusters_27327 ocfs2_blocks_to_clusters 0-2 27327 NULL
++snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
++afs_cell_create_27346 afs_cell_create 2 27346 NULL
++compat_SyS_semctl_27349 compat_SyS_semctl 4 27349 NULL
++pcbit_stat_27364 pcbit_stat 2 27364 NULL
++init_memory_mapping_27395 init_memory_mapping 0 27395 NULL
++phys_pte_init_27411 phys_pte_init 0-3-2 27411 NULL
++ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
++acpi_os_get_root_pointer_27416 acpi_os_get_root_pointer 0 27416 NULL nohasharray
++ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 &acpi_os_get_root_pointer_27416
++pack_sg_list_27425 pack_sg_list 0-2 27425 NULL
++ktime_to_us_27455 ktime_to_us 0 27455 NULL
++v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
++set_tpl_pfs_27490 set_tpl_pfs 3 27490 NULL
++hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
++qib_create_cq_27497 qib_create_cq 2 27497 NULL
++ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
++btrfs_get_64_27499 btrfs_get_64 0 27499 NULL
++__usbnet_write_cmd_27500 __usbnet_write_cmd 7 27500 NULL
++garmin_read_process_27509 garmin_read_process 3 27509 NULL
++ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
++snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
++SyS_fgetxattr_27571 SyS_fgetxattr 4 27571 NULL
++libipw_alloc_txb_27579 libipw_alloc_txb 1-2-3 27579 NULL
++read_flush_procfs_27642 read_flush_procfs 3 27642 NULL nohasharray
++nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &read_flush_procfs_27642 nohasharray
++ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 &nl80211_send_connect_result_27642
++add_new_gdb_27643 add_new_gdb 3 27643 NULL
++qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
++cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
++set_bypass_pwoff_pfs_27669 set_bypass_pwoff_pfs 3 27669 NULL
++qword_get_27670 qword_get 0 27670 NULL
++ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
++fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
++inc_zcache_eph_zbytes_27704 inc_zcache_eph_zbytes 1 27704 NULL
++evm_write_key_27715 evm_write_key 3 27715 NULL
++ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
++reg_w_buf_27724 reg_w_buf 3 27724 NULL
++xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
++a4t_cs_init_27734 a4t_cs_init 3 27734 NULL
++SyS_setsockopt_27759 SyS_setsockopt 5 27759 NULL
++kcalloc_27770 kcalloc 1-2 27770 NULL
++twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL
++DivaSTraceGetMemotyRequirement_27797 DivaSTraceGetMemotyRequirement 0-1 27797 NULL
++ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
++mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
++fwtty_buffer_rx_27821 fwtty_buffer_rx 3 27821 NULL
++init_header_complete_27833 init_header_complete 0 27833 NULL nohasharray
++sys_listxattr_27833 sys_listxattr 3 27833 &init_header_complete_27833
++read_profile_27859 read_profile 3 27859 NULL
++sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
++ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
++unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
++gluebi_write_27905 gluebi_write 3 27905 NULL
++SyS_ptrace_27924 SyS_ptrace 3-4 27924 NULL
++bm_find_next_27929 bm_find_next 2 27929 NULL
++tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
++tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL
++mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 NULL
++f2fs_bio_alloc_27983 f2fs_bio_alloc 2 27983 NULL
++edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
++snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
++serial8250_port_size_28019 serial8250_port_size 0 28019 NULL
++alloc_one_pg_vec_page_28031 alloc_one_pg_vec_page 1 28031 NULL
++sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
++rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL
++cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2 28053 NULL
++pool_status_28055 pool_status 5 28055 NULL
++lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
++tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
++ext4_read_block_bitmap_nowait_28078 ext4_read_block_bitmap_nowait 2 28078 NULL
++GetRecvByte_28082 GetRecvByte 0 28082 NULL
++platform_get_irq_28088 platform_get_irq 0 28088 NULL
++gdth_init_isa_28091 gdth_init_isa 1 28091 NULL
++mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
++rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
++vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
++video_read_28148 video_read 3 28148 NULL
++snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
++stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
++vread_28173 vread 0 28173 NULL
++macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
++d_path_28198 d_path 0 28198 NULL
++nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
++__qp_memcpy_from_queue_28220 __qp_memcpy_from_queue 3-4 28220 NULL
++line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL nohasharray
++set_dis_disc_pfs_28225 set_dis_disc_pfs 3 28225 &line6_alloc_sysex_buffer_28225
++amd_nb_num_28228 amd_nb_num 0 28228 NULL
++ext4_validate_block_bitmap_28243 ext4_validate_block_bitmap 3 28243 NULL
++usemap_size_28281 usemap_size 0 28281 NULL
++dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
++acpi_register_gsi_xen_28305 acpi_register_gsi_xen 2 28305 NULL nohasharray
++nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 &acpi_register_gsi_xen_28305
++__mlock_vma_pages_range_28315 __mlock_vma_pages_range 2-3 28315 NULL
++snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
++bm_entry_write_28338 bm_entry_write 3 28338 NULL
++snapshot_write_28351 snapshot_write 3 28351 NULL
++sys_writev_28384 sys_writev 3 28384 NULL
++dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
++tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
++set_bypass_pfs_28395 set_bypass_pfs 3 28395 NULL
++bypass_pwup_write_28416 bypass_pwup_write 3 28416 NULL
++subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
++__split_large_page_28429 __split_large_page 2 28429 NULL
++mpage_readpages_28436 mpage_readpages 3 28436 NULL
++set_memory_uc_28439 set_memory_uc 1 28439 NULL
++snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
++key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
++alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
++vmw_du_crtc_cursor_set_28479 vmw_du_crtc_cursor_set 4-5 28479 NULL
++ocfs2_backup_super_blkno_28484 ocfs2_backup_super_blkno 0-2 28484 NULL
++max_response_pages_28492 max_response_pages 0 28492 NULL
++clear_discard_28494 clear_discard 2 28494 NULL
++ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
++__next_node_28521 __next_node 1 28521 NULL
++i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
++sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
++run_delalloc_range_28545 run_delalloc_range 3 28545 NULL nohasharray
++mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 &run_delalloc_range_28545
++b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
++asymmetric_verify_28567 asymmetric_verify 3 28567 NULL
++phys_pud_init_28574 phys_pud_init 0-3-2 28574 NULL
++cfg80211_send_rx_auth_28580 cfg80211_send_rx_auth 3 28580 NULL
++oxygen_read32_28582 oxygen_read32 0 28582 NULL
++ocfs2_read_dir_block_28587 ocfs2_read_dir_block 2 28587 NULL
++extract_entropy_28604 extract_entropy 3-5 28604 NULL
++kfifo_unused_28612 kfifo_unused 0 28612 NULL
++mp_override_legacy_irq_28618 mp_override_legacy_irq 4 28618 NULL
++snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
++_set_range_28627 _set_range 3 28627 NULL
++v4l2_compat_ioctl32_28630 v4l2_compat_ioctl32 3 28630 NULL
++setup_usemap_28636 setup_usemap 3-4 28636 NULL
++blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
++__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
++nl80211_send_new_peer_candidate_28692 nl80211_send_new_peer_candidate 5 28692 NULL nohasharray
++kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 &nl80211_send_new_peer_candidate_28692
++drm_plane_init_28731 drm_plane_init 6 28731 NULL
++spi_execute_28736 spi_execute 5 28736 NULL
++snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL nohasharray
++phantom_compat_ioctl_28738 phantom_compat_ioctl 3 28738 &snd_pcm_aio_write_28738
++read_file_btcoex_28743 read_file_btcoex 3 28743 NULL
++max_hw_blocks_28748 max_hw_blocks 0 28748 NULL
++ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
++dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
++btrfs_block_rsv_refill_28800 btrfs_block_rsv_refill 3 28800 NULL nohasharray
++sel_write_member_28800 sel_write_member 3 28800 &btrfs_block_rsv_refill_28800
++cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
++btrfs_ref_to_path_28809 btrfs_ref_to_path 0 28809 NULL
++memory_bm_create_28814 memory_bm_create 0 28814 NULL
++iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
++C_SYSC_shmat_28843 C_SYSC_shmat 2 28843 NULL
++vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
++ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
++packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
++to_cblock_28899 to_cblock 0-1 28899 NULL
++da9055_group_write_28904 da9055_group_write 2-3 28904 NULL
++ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
++ocfs2_frozen_trigger_28929 ocfs2_frozen_trigger 4 28929 NULL
++push_rx_28939 push_rx 3 28939 NULL
++btrfs_trim_block_group_28963 btrfs_trim_block_group 3 28963 NULL
++alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
++ext4_mb_add_groupinfo_28988 ext4_mb_add_groupinfo 2 28988 NULL
++bin_uuid_28999 bin_uuid 3 28999 NULL
++offset_to_bitmap_29004 offset_to_bitmap 2 29004 NULL
++xz_dec_init_29029 xz_dec_init 2 29029 NULL
++sys_fcntl64_29031 sys_fcntl64 3 29031 NULL
++ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
++rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
++iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
++lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
++memblock_alloc_base_nid_29072 memblock_alloc_base_nid 1-2 29072 NULL
++sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL
++mark_extents_written_29082 mark_extents_written 2 29082 NULL
++i915_error_object_create_sized_29091 i915_error_object_create_sized 3 29091 NULL
++isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
++snprintf_29125 snprintf 0 29125 NULL
++iov_shorten_29130 iov_shorten 0 29130 NULL
++proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
++reshape_ring_29147 reshape_ring 2 29147 NULL
++alloc_irqs_from_29152 alloc_irqs_from 1-2 29152 NULL
++drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
++wusb_prf_256_29203 wusb_prf_256 7 29203 NULL nohasharray
++alloc_group_attrs_29203 alloc_group_attrs 3 29203 &wusb_prf_256_29203
++__mm_populate_29204 __mm_populate 1 29204 NULL
++comedi_alloc_subdevices_29207 comedi_alloc_subdevices 2 29207 NULL
++do_shrinker_shrink_29208 do_shrinker_shrink 0 29208 NULL
++iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
++nvme_trans_copy_from_user_29227 nvme_trans_copy_from_user 3 29227 NULL
++devm_ioremap_29235 devm_ioremap 2-3 29235 NULL
++irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
++recover_peb_29238 recover_peb 6-7 29238 NULL
++security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
++block_div_29268 block_div 0-1-2 29268 NULL
++prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
++bitmap_ord_to_pos_29279 bitmap_ord_to_pos 3 29279 NULL
++sn9c102_read_29305 sn9c102_read 3 29305 NULL
++__fuse_get_req_29315 __fuse_get_req 2 29315 NULL
++lo_compat_ioctl_29336 lo_compat_ioctl 4 29336 NULL
++tun_put_user_29337 tun_put_user 5 29337 NULL
++__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
++alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
++ktime_us_delta_29375 ktime_us_delta 0 29375 NULL
++mwifiex_cfg80211_mgmt_tx_29387 mwifiex_cfg80211_mgmt_tx 7 29387 NULL
++pca953x_irq_setup_29407 pca953x_irq_setup 3 29407 NULL
++mempool_create_29437 mempool_create 1 29437 NULL
++crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
++apei_exec_ctx_get_output_29457 apei_exec_ctx_get_output 0 29457 NULL
++validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
++SyS_flistxattr_29474 SyS_flistxattr 3 29474 NULL
++do_register_entry_29478 do_register_entry 4 29478 NULL
++simple_strtoul_29480 simple_strtoul 0 29480 NULL
++sched_clock_local_29498 sched_clock_local 0 29498 NULL
++btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
++btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
++atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
++_regmap_raw_write_29541 _regmap_raw_write 4-2 29541 NULL
++set_brk_29551 set_brk 1 29551 NULL nohasharray
++ftrace_write_29551 ftrace_write 3 29551 &set_brk_29551
++idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
++leaf_dealloc_29566 leaf_dealloc 3 29566 NULL nohasharray
++alloc_empty_pages_29566 alloc_empty_pages 2 29566 &leaf_dealloc_29566
++lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
++pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4 29589 NULL
++slots_per_page_29601 slots_per_page 0 29601 NULL
++qla4_82xx_pci_set_window_29605 qla4_82xx_pci_set_window 0-2 29605 NULL
++alloc_low_pages_29623 alloc_low_pages 1 29623 NULL
++nla_get_u16_29624 nla_get_u16 0 29624 NULL
++tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
++lowmem_page_address_29649 lowmem_page_address 0 29649 NULL
++sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
++br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
++sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
++sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL nohasharray
++posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 &sd_alloc_ctl_entry_29708
++probes_write_29711 probes_write 3 29711 NULL
++emi62_writememory_29731 emi62_writememory 4 29731 NULL
++read_cis_cache_29735 read_cis_cache 4 29735 NULL
++std_nic_write_29752 std_nic_write 3 29752 NULL
++ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 NULL
++tcf_csum_ipv6_icmp_29777 tcf_csum_ipv6_icmp 3 29777 NULL
++dbAlloc_29794 dbAlloc 0 29794 NULL
++ext4_trim_all_free_29806 ext4_trim_all_free 4-3-2 29806 NULL
++tcp_sendpage_29829 tcp_sendpage 4 29829 NULL
++scan_bitmap_block_29840 scan_bitmap_block 4 29840 NULL
++__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
++kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
++solo_enc_alloc_29860 solo_enc_alloc 3 29860 NULL
++ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
++scsi_end_request_29876 scsi_end_request 3 29876 NULL
++crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
++nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
++ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
++__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL
++irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL nohasharray
++diva_os_get_context_size_29983 diva_os_get_context_size 0 29983 &irias_add_octseq_attrib_29983
++arch_setup_dmar_msi_29992 arch_setup_dmar_msi 1 29992 NULL
++vmci_host_setup_notify_30002 vmci_host_setup_notify 2 30002 NULL
++utf32_to_utf8_30028 utf32_to_utf8 0 30028 NULL
++alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
++scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
++drp_wmove_30043 drp_wmove 4 30043 NULL
++cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
++snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
++tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
++rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL nohasharray
++dma_to_phys_30098 dma_to_phys 0-2 30098 &rx_filter_data_filter_read_30098
++skb_pagelen_30113 skb_pagelen 0 30113 NULL
++spi_async_locked_30117 spi_async_locked 0 30117 NULL
++calgary_unmap_page_30130 calgary_unmap_page 2-3 30130 NULL
++_osd_req_sizeof_alist_header_30134 _osd_req_sizeof_alist_header 0 30134 NULL
++u_memcpya_30139 u_memcpya 2-3 30139 NULL
++btrfs_start_transaction_lflush_30178 btrfs_start_transaction_lflush 2 30178 NULL
++cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
++mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
++drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
++usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
++nfs_idmap_request_key_30208 nfs_idmap_request_key 3 30208 NULL
++read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
++snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
++f2fs_compat_ioctl_30261 f2fs_compat_ioctl 3 30261 NULL
++isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
++compat_readv_30273 compat_readv 3 30273 NULL
++lapic_register_intr_30279 lapic_register_intr 1 30279 NULL
++skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
++pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
++tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
++ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
++generic_ptrace_pokedata_30338 generic_ptrace_pokedata 2 30338 NULL
++resource_from_user_30341 resource_from_user 3 30341 NULL
++__vmalloc_node_flags_30352 __vmalloc_node_flags 1 30352 NULL
++C_SYSC_readv_30369 C_SYSC_readv 3 30369 NULL
++sys_get_mempolicy_30379 sys_get_mempolicy 3-4 30379 NULL
++mangle_sdp_packet_30381 mangle_sdp_packet 10 30381 NULL
++c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
++get_kernel_pages_30397 get_kernel_pages 0 30397 NULL
++_drbd_bm_find_next_zero_30415 _drbd_bm_find_next_zero 2 30415 NULL
++vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
++tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
++mq_create_30425 mq_create 1 30425 NULL
++enable_write_30456 enable_write 3 30456 NULL
++tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
++urandom_read_30462 urandom_read 3 30462 NULL
++zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
++ocrdma_reg_user_mr_30474 ocrdma_reg_user_mr 2-3 30474 NULL
++write_head_30481 write_head 4 30481 NULL
++adu_write_30487 adu_write 3 30487 NULL
++dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
++debug_debug2_read_30526 debug_debug2_read 3 30526 NULL
++batadv_dat_snoop_incoming_arp_request_30548 batadv_dat_snoop_incoming_arp_request 3 30548 NULL
++disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
++set_le_30581 set_le 4 30581 NULL
++from_cblock_30582 from_cblock 0-1 30582 NULL
++blk_init_tags_30592 blk_init_tags 1 30592 NULL
++i2c_hid_get_report_length_30598 i2c_hid_get_report_length 0 30598 NULL
++sgl_map_user_pages_30610 sgl_map_user_pages 2-3-4 30610 NULL
++SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
++macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
++ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL
++compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
++agp_remap_30665 agp_remap 2 30665 NULL
++jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
++il_free_pages_30692 il_free_pages 2 30692 NULL
++dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
++lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
++snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
++snapshot_status_30744 snapshot_status 5 30744 NULL
++tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 NULL
++smk_read_doi_30813 smk_read_doi 3 30813 NULL
++get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
++sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
++cfg80211_rx_mgmt_30844 cfg80211_rx_mgmt 5 30844 NULL
++hda_hwdep_ioctl_compat_30847 hda_hwdep_ioctl_compat 4 30847 NULL
++trace_probe_nr_files_30882 trace_probe_nr_files 0 30882 NULL
++ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
++iommu_map_mmio_space_30919 iommu_map_mmio_space 1 30919 NULL
++sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
++tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
++huge_page_mask_30981 huge_page_mask 0 30981 NULL
++i2400mu_rx_size_grow_30989 i2400mu_rx_size_grow 0 30989 NULL
++lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
++phys_pmd_init_31024 phys_pmd_init 0-3-2 31024 NULL
++compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
++lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
++find_next_bit_le_31064 find_next_bit_le 0-2-3 31064 NULL
++sys_mincore_31079 sys_mincore 1 31079 NULL
++ttm_bo_ioremap_31082 ttm_bo_ioremap 2-3 31082 NULL
++sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
++compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3-4 31109 NULL
++depth_read_31112 depth_read 3 31112 NULL
++ssb_read16_31139 ssb_read16 0 31139 NULL
++kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
++size_inside_page_31141 size_inside_page 0 31141 NULL
++w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
++ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
++acpi_ex_system_memory_space_handler_31192 acpi_ex_system_memory_space_handler 2 31192 NULL
++r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
++mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
++__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
++kvm_mmu_page_fault_31213 kvm_mmu_page_fault 2 31213 NULL
++cpumask_weight_31215 cpumask_weight 0 31215 NULL
++__read_reg_31216 __read_reg 0 31216 NULL
++atm_get_addr_31221 atm_get_addr 3 31221 NULL
++cyy_readb_31240 cyy_readb 0 31240 NULL
++_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
++ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
++sctp_tsnmap_find_gap_ack_31272 sctp_tsnmap_find_gap_ack 3-2 31272 NULL
++uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
++sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
++command_file_write_31318 command_file_write 3 31318 NULL
++em28xx_init_usb_xfer_31337 em28xx_init_usb_xfer 4-6 31337 NULL
++__cpu_to_node_31345 __cpu_to_node 0 31345 NULL
++xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
++vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3-2 31374 NULL
++trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
++inb_31388 inb 0 31388 NULL
++key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
++mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 NULL
++TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
++snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
++acpi_sci_ioapic_setup_31445 acpi_sci_ioapic_setup 4 31445 NULL
++opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
++input_get_new_minor_31464 input_get_new_minor 1 31464 NULL
++do_fcntl_31468 do_fcntl 3 31468 NULL
++xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
++alg_setkey_31485 alg_setkey 3 31485 NULL
++rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
++qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
++__alloc_bootmem_31498 __alloc_bootmem 1-2 31498 NULL
++rmode_tss_base_31510 rmode_tss_base 0 31510 NULL
++hidraw_write_31536 hidraw_write 3 31536 NULL
++mtd_div_by_eb_31543 mtd_div_by_eb 0-1 31543 NULL
++usbvision_read_31555 usbvision_read 3 31555 NULL
++normalize_31566 normalize 0-1-2 31566 NULL
++tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
++get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL
++osst_write_31581 osst_write 3 31581 NULL
++snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL
++iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
++mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL
++arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
++videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
++pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
++xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
++emulator_set_cr_31665 emulator_set_cr 3 31665 NULL
++__lgread_31668 __lgread 4 31668 NULL
++symbol_string_31670 symbol_string 0 31670 NULL
++_usb_writeN_sync_31682 _usb_writeN_sync 4 31682 NULL
++forced_ps_read_31685 forced_ps_read 3 31685 NULL
++reiserfs_in_journal_31689 reiserfs_in_journal 3 31689 NULL
++audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
++ath6kl_wmi_send_probe_response_cmd_31728 ath6kl_wmi_send_probe_response_cmd 6 31728 NULL nohasharray
++gfn_to_hva_read_31728 gfn_to_hva_read 2 31728 &ath6kl_wmi_send_probe_response_cmd_31728
++utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL
++shmem_pwrite_slow_31741 shmem_pwrite_slow 3 31741 NULL
++NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL nohasharray
++input_abs_get_max_31742 input_abs_get_max 0 31742 &NCR_700_change_queue_depth_31742
++muldiv64_31743 muldiv64 2-3 31743 NULL
++bcm_char_read_31750 bcm_char_read 3 31750 NULL
++snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
++set_memory_wb_31761 set_memory_wb 1 31761 NULL
++usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
++get_count_order_31800 get_count_order 0 31800 NULL
++ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
++isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
++strnlen_user_31815 strnlen_user 0-2 31815 NULL
++sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
++drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
++ddb_output_write_31902 ddb_output_write 3 31902 NULL
++xattr_permission_31907 xattr_permission 0 31907 NULL
++new_dir_31919 new_dir 3 31919 NULL
++kmem_alloc_31920 kmem_alloc 1 31920 NULL
++guestwidth_to_adjustwidth_31937 guestwidth_to_adjustwidth 0-1 31937 NULL
++SYSC_sethostname_31940 SYSC_sethostname 2 31940 NULL
++iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4 31942 NULL
++vb2_write_31948 vb2_write 3 31948 NULL
++pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
++regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL
++copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
++mtd_add_partition_31971 mtd_add_partition 3 31971 NULL
++find_next_zero_bit_31990 find_next_zero_bit 0-2-3 31990 NULL
++default_setup_hpet_msi_31991 default_setup_hpet_msi 1 31991 NULL
++tps6586x_irq_map_32002 tps6586x_irq_map 2 32002 NULL
++calc_hmac_32010 calc_hmac 3 32010 NULL
++vmcs_read64_32012 vmcs_read64 0 32012 NULL
++aead_len_32021 aead_len 0 32021 NULL
++ocfs2_remove_extent_32032 ocfs2_remove_extent 4-3 32032 NULL
++posix_acl_set_32037 posix_acl_set 4 32037 NULL
++stk_read_32038 stk_read 3 32038 NULL
++vmw_cursor_update_dmabuf_32045 vmw_cursor_update_dmabuf 3-4 32045 NULL
++sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 NULL
++SYSC_llistxattr_32061 SYSC_llistxattr 3 32061 NULL
++proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
++cfg80211_send_unprot_deauth_32080 cfg80211_send_unprot_deauth 3 32080 NULL
++bio_alloc_32095 bio_alloc 2 32095 NULL
++alloc_pwms_32100 alloc_pwms 1-2 32100 NULL
++ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
++disk_status_32120 disk_status 4 32120 NULL
++venus_link_32165 venus_link 5 32165 NULL
++do_writepages_32173 do_writepages 0 32173 NULL nohasharray
++ntfs_rl_realloc_nofail_32173 ntfs_rl_realloc_nofail 3 32173 &do_writepages_32173
++load_header_32183 load_header 0 32183 NULL
++ubi_wl_scrub_peb_32196 ubi_wl_scrub_peb 0 32196 NULL
++wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
++riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
++lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
++ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
++fb_compat_ioctl_32265 fb_compat_ioctl 3 32265 NULL
++vmalloc_user_32308 vmalloc_user 1 32308 NULL
++hex_string_32310 hex_string 0 32310 NULL
++SyS_select_32319 SyS_select 1 32319 NULL
++nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
++nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
++t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
++dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL
++sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
++_drbd_bm_find_next_32372 _drbd_bm_find_next 2 32372 NULL
++usbtmc_read_32377 usbtmc_read 3 32377 NULL
++local_clock_32385 local_clock 0 32385 NULL
++qla4_82xx_pci_mem_write_2M_32398 qla4_82xx_pci_mem_write_2M 2 32398 NULL
++xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
++vmci_qp_alloc_32405 vmci_qp_alloc 3-5 32405 NULL
++log_text_32428 log_text 0 32428 NULL
++regmap_irq_map_32429 regmap_irq_map 2 32429 NULL
++hid_input_report_32458 hid_input_report 4 32458 NULL
++cache_status_32462 cache_status 5 32462 NULL
++ieee80211_fill_mesh_addresses_32465 ieee80211_fill_mesh_addresses 0 32465 NULL
++ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
++bypass_pwoff_write_32499 bypass_pwoff_write 3 32499 NULL
++ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
++disconnect_32521 disconnect 4 32521 NULL
++qsfp_read_32522 qsfp_read 0-4-2 32522 NULL
++ilo_read_32531 ilo_read 3 32531 NULL
++ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
++gnttab_set_unmap_op_32534 gnttab_set_unmap_op 2 32534 NULL
++ieee80211_send_auth_32543 ieee80211_send_auth 6 32543 NULL
++format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
++__first_node_32558 __first_node 0 32558 NULL
++aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
++pnp_mem_len_32584 pnp_mem_len 0 32584 NULL
++mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
++pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
++read_file_beacon_32595 read_file_beacon 3 32595 NULL
++ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
++sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
++cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
++ieee80211_hdrlen_32637 ieee80211_hdrlen 0 32637 NULL
++ite_decode_bytes_32642 ite_decode_bytes 3 32642 NULL
++kvmalloc_32646 kvmalloc 1 32646 NULL
++ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
++generic_readlink_32654 generic_readlink 3 32654 NULL
++move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
++compat_SyS_pwritev_32680 compat_SyS_pwritev 3 32680 NULL
++jfs_readpages_32702 jfs_readpages 4 32702 NULL
++snd_hwdep_ioctl_compat_32736 snd_hwdep_ioctl_compat 3 32736 NULL
++get_arg_page_32746 get_arg_page 2 32746 NULL
++megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
++stats_read_ul_32751 stats_read_ul 3 32751 NULL
++tty_compat_ioctl_32761 tty_compat_ioctl 3 32761 NULL
++sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
++firmwareUpload_32794 firmwareUpload 3 32794 NULL
++rproc_name_read_32805 rproc_name_read 3 32805 NULL
++vga_rseq_32848 vga_rseq 0 32848 NULL
++new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
++io_apic_setup_irq_pin_32868 io_apic_setup_irq_pin 1 32868 NULL
++ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 NULL nohasharray
++cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 &ath6kl_usb_submit_ctrl_in_32880
++ath6kl_usb_post_recv_transfers_32892 ath6kl_usb_post_recv_transfers 2 32892 NULL
++ext4_get_group_number_32899 ext4_get_group_number 0 32899 NULL
++il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
++zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
++rmap_recycle_32938 rmap_recycle 3 32938 NULL
++irq_reserve_irqs_32946 irq_reserve_irqs 1-2 32946 NULL
++ext4_valid_block_bitmap_32958 ext4_valid_block_bitmap 3 32958 NULL
++arch_ptrace_32981 arch_ptrace 3-4 32981 NULL
++compat_filldir_32999 compat_filldir 3 32999 NULL
++ext3_alloc_blocks_33007 ext3_alloc_blocks 3 33007 NULL nohasharray
++SyS_syslog_33007 SyS_syslog 3 33007 &ext3_alloc_blocks_33007
++SYSC_lgetxattr_33049 SYSC_lgetxattr 4 33049 NULL
++pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
++ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
++bitmap_resize_33054 bitmap_resize 2 33054 NULL
++stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
++sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
++alloc_tio_33077 alloc_tio 3 33077 NULL
++acl_permission_check_33083 acl_permission_check 0 33083 NULL
++ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
++write_node_33121 write_node 4 33121 NULL
++calc_patch_size_33124 calc_patch_size 0 33124 NULL
++fb_sys_write_33130 fb_sys_write 3 33130 NULL
++__len_within_target_33132 __len_within_target 0 33132 NULL
++debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
++dataflash_read_fact_otp_33204 dataflash_read_fact_otp 2-3 33204 NULL
++pp_read_33210 pp_read 3 33210 NULL
++xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
++build_completion_wait_33242 build_completion_wait 2 33242 NULL
++snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
++sched_find_first_bit_33270 sched_find_first_bit 0 33270 NULL
++cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
++mei_compat_ioctl_33275 mei_compat_ioctl 3 33275 NULL
++sync_pt_create_33282 sync_pt_create 2 33282 NULL
++mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
++isku_sysfs_read_keys_easyzone_33318 isku_sysfs_read_keys_easyzone 6 33318 NULL
++ath6kl_usb_ctrl_msg_exchange_33327 ath6kl_usb_ctrl_msg_exchange 4 33327 NULL
++gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
++joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
++create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
++irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
++read_file_regidx_33370 read_file_regidx 3 33370 NULL
++ocfs2_quota_read_33382 ocfs2_quota_read 5 33382 NULL
++ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
++scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
++tg_get_cfs_period_33390 tg_get_cfs_period 0 33390 NULL
++ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 2-3 33394 NULL
++ext4_meta_bg_first_block_no_33408 ext4_meta_bg_first_block_no 2 33408 NULL nohasharray
++snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 &ext4_meta_bg_first_block_no_33408
++ufs_getfrag_block_33409 ufs_getfrag_block 2 33409 NULL
++dis_tap_write_33426 dis_tap_write 3 33426 NULL
++ubh_scanc_33436 ubh_scanc 0-4-3 33436 NULL
++ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
++create_entry_33479 create_entry 2 33479 NULL
++ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
++elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
++netxen_nic_hw_write_wx_128M_33488 netxen_nic_hw_write_wx_128M 2 33488 NULL
++ol_dqblk_chunk_off_33489 ol_dqblk_chunk_off 2 33489 NULL
++res_counter_read_33499 res_counter_read 4 33499 NULL
++fb_read_33506 fb_read 3 33506 NULL
++musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
++ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
++nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
++aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
++acpi_gsi_to_irq_33533 acpi_gsi_to_irq 1 33533 NULL
++tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
++dup_array_33551 dup_array 3 33551 NULL
++solo_enc_read_33553 solo_enc_read 3 33553 NULL
++count_subheaders_33591 count_subheaders 0 33591 NULL
++scsi_execute_33596 scsi_execute 5 33596 NULL
++comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
++xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL nohasharray
++ip6_find_1stfragopt_33608 ip6_find_1stfragopt 0 33608 &xt_compat_target_offset_33608
++usb_gstrings_attach_33615 usb_gstrings_attach 3 33615 NULL nohasharray
++il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 &usb_gstrings_attach_33615
++irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
++inw_p_33668 inw_p 0 33668 NULL
++arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
++i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL
++ath6kl_wmi_startscan_cmd_33674 ath6kl_wmi_startscan_cmd 8 33674 NULL
++nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL
++compat_insnlist_33706 compat_insnlist 2 33706 NULL
++sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
++netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
++tipc_link_stats_33716 tipc_link_stats 3 33716 NULL
++pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
++ocfs2_extent_map_get_blocks_33720 ocfs2_extent_map_get_blocks 2 33720 NULL
++__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
++Read_hfc_33755 Read_hfc 0 33755 NULL
++vifs_state_read_33762 vifs_state_read 3 33762 NULL
++hashtab_create_33769 hashtab_create 3 33769 NULL
++midibuf_message_length_33770 midibuf_message_length 0 33770 NULL
++if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
++find_next_offset_33804 find_next_offset 3 33804 NULL
++sky2_rx_pad_33819 sky2_rx_pad 0 33819 NULL
++sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
++scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL
++udplite_manip_pkt_33832 udplite_manip_pkt 4 33832 NULL
++usb_dump_endpoint_descriptor_33849 usb_dump_endpoint_descriptor 0 33849 NULL
++calgary_alloc_coherent_33851 calgary_alloc_coherent 2 33851 NULL
++oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
++cap_mmap_addr_33853 cap_mmap_addr 0 33853 NULL
++get_user_pages_33908 get_user_pages 0-3-4 33908 NULL
++queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
++sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
++lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
++read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
++vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
++__ntfs_malloc_34022 __ntfs_malloc 1 34022 NULL
++ppp_write_34034 ppp_write 3 34034 NULL
++tty_insert_flip_string_34042 tty_insert_flip_string 3 34042 NULL
++__domain_flush_pages_34045 __domain_flush_pages 2-3 34045 NULL
++is_trap_at_addr_34047 is_trap_at_addr 2 34047 NULL
++acpi_dev_get_irqresource_34064 acpi_dev_get_irqresource 2 34064 NULL
++memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL
++read_file_ant_diversity_34071 read_file_ant_diversity 3 34071 NULL
++compat_hdio_ioctl_34088 compat_hdio_ioctl 4 34088 NULL
++pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
++proc_scsi_host_write_34107 proc_scsi_host_write 3 34107 NULL
++is_discarded_oblock_34120 is_discarded_oblock 2 34120 NULL
++islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
++ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2 34135 NULL
++cdc_mbim_process_dgram_34136 cdc_mbim_process_dgram 3 34136 NULL
++ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
++shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
++skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
++ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL
++bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
++pcf857x_to_irq_34273 pcf857x_to_irq 2 34273 NULL
++zone_spanned_pages_in_node_34299 zone_spanned_pages_in_node 0 34299 NULL
++iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 NULL nohasharray
++pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 &iov_iter_single_seg_count_34326
++__insert_34349 __insert 2-3 34349 NULL
++crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL
++rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
++p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
++i2c_hid_get_raw_report_34376 i2c_hid_get_raw_report 0 34376 NULL
++reiserfs_resize_34377 reiserfs_resize 2 34377 NULL
++ea_read_34378 ea_read 0 34378 NULL
++fuse_send_read_34379 fuse_send_read 4 34379 NULL
++av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
++usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
++read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
++iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
++ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
++wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL
++nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
++usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
++mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
++skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
++i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
++security_inode_permission_34488 security_inode_permission 0 34488 NULL
++tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
++hugetlbfs_read_actor_34547 hugetlbfs_read_actor 0-2-5-4 34547 NULL
++dbBackSplit_34561 dbBackSplit 0 34561 NULL
++alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
++velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
++init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
++inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
++ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL
++brcmf_cfg80211_mgmt_tx_34608 brcmf_cfg80211_mgmt_tx 7 34608 NULL
++__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
++__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
++cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
++tomoyo_dump_page_34649 tomoyo_dump_page 2 34649 NULL
++nf_nat_mangle_udp_packet_34661 nf_nat_mangle_udp_packet 8-6 34661 NULL
++isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
++port_print_34704 port_print 3 34704 NULL
++alloc_irq_and_cfg_at_34706 alloc_irq_and_cfg_at 1 34706 NULL
++ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
++platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
++reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
++qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
++__copy_in_user_34790 __copy_in_user 3 34790 NULL
++SYSC_keyctl_34800 SYSC_keyctl 4 34800 NULL
++drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
++b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
++nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
++acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
++usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
++ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL
++msg_print_text_34889 msg_print_text 0 34889 NULL
++ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
++compat_put_uint_34905 compat_put_uint 1 34905 NULL
++si476x_radio_read_rsq_primary_blob_34916 si476x_radio_read_rsq_primary_blob 3 34916 NULL
++__inode_permission_34925 __inode_permission 0 34925 NULL nohasharray
++btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 &__inode_permission_34925
++ceph_aio_write_34930 ceph_aio_write 4 34930 NULL
++skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 NULL nohasharray
++i2c_transfer_34958 i2c_transfer 0 34958 &skb_gro_header_slow_34958
++Realloc_34961 Realloc 2 34961 NULL
++mq_lookup_34990 mq_lookup 2 34990 NULL
++rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
++l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
++dm_cache_insert_mapping_35005 dm_cache_insert_mapping 2-3 35005 NULL
++sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
++alloc_p2m_page_35025 alloc_p2m_page 0 35025 NULL
++coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
++brcmf_sdio_chip_writenvram_35042 brcmf_sdio_chip_writenvram 4 35042 NULL
++btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
++pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
++store_ifalias_35088 store_ifalias 4 35088 NULL
++__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
++capi_write_35104 capi_write 3 35104 NULL nohasharray
++tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
++ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
++pointer_35138 pointer 0 35138 NULL
++gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
++iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
++ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
++solo_v4l2_init_35179 solo_v4l2_init 2 35179 NULL
++mlx4_ib_get_cq_umem_35184 mlx4_ib_get_cq_umem 5-6 35184 NULL
++iwl_nvm_read_chunk_35198 iwl_nvm_read_chunk 0 35198 NULL
++uprobe_get_swbp_addr_35201 uprobe_get_swbp_addr 0 35201 NULL
++unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
++_osd_req_alist_elem_size_35216 _osd_req_alist_elem_size 0-2 35216 NULL
++striped_read_35218 striped_read 0-2-8-3 35218 NULL nohasharray
++security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218
++rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL nohasharray
++video_register_device_no_warn_35226 video_register_device_no_warn 3 35226 &rx_rx_cmplt_task_read_35226
++gfn_to_page_many_atomic_35234 gfn_to_page_many_atomic 2 35234 NULL
++SYSC_madvise_35241 SYSC_madvise 1 35241 NULL
++set_fd_set_35249 set_fd_set 1 35249 NULL
++ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
++dis_disc_write_35265 dis_disc_write 3 35265 NULL
++dma_show_regs_35266 dma_show_regs 3 35266 NULL
++irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
++i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
++isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
++brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 NULL nohasharray
++__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 &brcmf_sdio_forensic_read_35311
++tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
++sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
++irq_domain_disassociate_many_35325 irq_domain_disassociate_many 2-3 35325 NULL
++fallback_on_nodma_alloc_35332 fallback_on_nodma_alloc 2 35332 NULL
++pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
++ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL
++nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
++hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
++compat_filldir64_35354 compat_filldir64 3 35354 NULL
++SyS_getxattr_35408 SyS_getxattr 4 35408 NULL
++rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
++__set_test_and_free_35436 __set_test_and_free 2 35436 NULL
++buffer_to_user_35439 buffer_to_user 3 35439 NULL
++i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
++ecryptfs_get_zeroed_pages_35483 ecryptfs_get_zeroed_pages 0 35483 NULL
++do_atm_ioctl_35519 do_atm_ioctl 3 35519 NULL
++async_setkey_35521 async_setkey 3 35521 NULL
++__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
++iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
++rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
++ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
++ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
++ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
++spk_msg_set_35586 spk_msg_set 3 35586 NULL
++ReadZReg_35604 ReadZReg 0 35604 NULL
++kernel_readv_35617 kernel_readv 3 35617 NULL
++ixgbe_pci_sriov_configure_35624 ixgbe_pci_sriov_configure 2 35624 NULL
++reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
++spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
++store_debug_level_35652 store_debug_level 3 35652 NULL
++rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
++compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
++dm_table_create_35687 dm_table_create 3 35687 NULL
++SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL
++rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
++pci_enable_sriov_35745 pci_enable_sriov 2 35745 NULL
++iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
++udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
++pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
++tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
++mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
++fls64_35862 fls64 0 35862 NULL
++kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
++ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
++uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
++SyS_set_mempolicy_35909 SyS_set_mempolicy 3 35909 NULL
++kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
++rbio_nr_pages_35916 rbio_nr_pages 0-1-2 35916 NULL
++vol_cdev_compat_ioctl_35923 vol_cdev_compat_ioctl 3 35923 NULL
++sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
++rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
++put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
++ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL
++ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
++acl_alloc_35979 acl_alloc 1 35979 NULL
++generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
++koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
++il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
++ubi_eba_write_leb_36029 ubi_eba_write_leb 5-6 36029 NULL
++__videobuf_alloc_36031 __videobuf_alloc 1 36031 NULL
++account_shadowed_36048 account_shadowed 2 36048 NULL
++gpio_power_read_36059 gpio_power_read 3 36059 NULL
++write_emulate_36065 write_emulate 2-4 36065 NULL
++radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL
++ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
++ext3_new_blocks_36073 ext3_new_blocks 3 36073 NULL
++ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
++snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
++vga_arb_write_36112 vga_arb_write 3 36112 NULL
++simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
++compat_ptrace_request_36131 compat_ptrace_request 3-4 36131 NULL
++vmalloc_exec_36132 vmalloc_exec 1 36132 NULL
++max8925_irq_domain_map_36133 max8925_irq_domain_map 2 36133 NULL
++ext3_readpages_36144 ext3_readpages 4 36144 NULL
++alloc_vm_area_36149 alloc_vm_area 1 36149 NULL
++twl_set_36154 twl_set 2 36154 NULL
++b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
++btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
++snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
++SyS_kexec_load_36176 SyS_kexec_load 2 36176 NULL
++SYSC_sched_getaffinity_36208 SYSC_sched_getaffinity 2 36208 NULL
++SYSC_process_vm_readv_36216 SYSC_process_vm_readv 3-5 36216 NULL
++ubifs_read_nnode_36221 ubifs_read_nnode 0 36221 NULL
++is_dirty_36223 is_dirty 2 36223 NULL
++dma_alloc_attrs_36225 dma_alloc_attrs 0 36225 NULL
++nfqnl_mangle_36226 nfqnl_mangle 4-2 36226 NULL
++atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
++viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
++SYSC_getxattr_36242 SYSC_getxattr 4 36242 NULL
++rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
++scrub_stripe_36248 scrub_stripe 5-4 36248 NULL
++compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
++usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
++codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
++crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL
++nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
++lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
++ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
++fuse_get_user_addr_36312 fuse_get_user_addr 0 36312 NULL
++fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
++lc_create_36332 lc_create 4 36332 NULL
++jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
++isku_sysfs_read_key_mask_36343 isku_sysfs_read_key_mask 6 36343 NULL
++v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL nohasharray
++xz_dec_lzma2_create_36353 xz_dec_lzma2_create 2 36353 &v9fs_file_readn_36353
++to_sector_36361 to_sector 0-1 36361 NULL
++tunables_read_36385 tunables_read 3 36385 NULL
++afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
++SyS_sethostname_36417 SyS_sethostname 2 36417 NULL
++sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
++alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
++tcf_csum_ipv6_udp_36457 tcf_csum_ipv6_udp 3 36457 NULL
++SyS_process_vm_writev_36476 SyS_process_vm_writev 3-5 36476 NULL
++b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
++tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 NULL
++__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
++mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
++get_param_l_36518 get_param_l 0 36518 NULL
++ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
++lguest_setup_irq_36531 lguest_setup_irq 1 36531 NULL
++crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
++cpu_type_read_36540 cpu_type_read 3 36540 NULL
++get_entry_len_36549 get_entry_len 0 36549 NULL
++__kfifo_to_user_36555 __kfifo_to_user 3 36555 NULL nohasharray
++macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
++btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL
++ssb_bus_scan_36578 ssb_bus_scan 2 36578 NULL
++__erst_read_36579 __erst_read 0 36579 NULL
++put_cmsg_36589 put_cmsg 4 36589 NULL
++pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
++fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
++vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
++format_decode_36638 format_decode 0 36638 NULL
++ced_ioctl_36647 ced_ioctl 2 36647 NULL
++lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
++perf_calculate_period_36662 perf_calculate_period 3-2 36662 NULL
++osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
++iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
++ptr_to_compat_36680 ptr_to_compat 0 36680 NULL
++ext4_mb_discard_group_preallocations_36685 ext4_mb_discard_group_preallocations 2 36685 NULL
++sched_clock_36717 sched_clock 0 36717 NULL
++extract_icmp6_fields_36732 extract_icmp6_fields 2 36732 NULL
++snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4 36740 NULL
++cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
++ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
++ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
++ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
++proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
++hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
++tcf_csum_ipv6_tcp_36822 tcf_csum_ipv6_tcp 3 36822 NULL
++int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
++fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
++keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
++cm_write_36858 cm_write 3 36858 NULL
++tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
++svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
++raid56_parity_write_36877 raid56_parity_write 5 36877 NULL
++__btrfs_map_block_36883 __btrfs_map_block 3 36883 NULL
++ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
++selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
++OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
++build_key_36931 build_key 1 36931 NULL
++crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
++write_leb_36957 write_leb 5 36957 NULL
++ntfs_external_attr_find_36963 ntfs_external_attr_find 0 36963 NULL
++sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
++drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL
++auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
++setxattr_37006 setxattr 4 37006 NULL
++qp_broker_create_37053 qp_broker_create 6-5 37053 NULL nohasharray
++ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 &qp_broker_create_37053
++SYSC_setxattr_37078 SYSC_setxattr 4 37078 NULL
++parse_command_37079 parse_command 2 37079 NULL
++pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
++tun_get_user_37094 tun_get_user 5 37094 NULL
++has_wrprotected_page_37123 has_wrprotected_page 2-3 37123 NULL
++snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
++msg_word_37164 msg_word 0 37164 NULL
++can_set_xattr_37182 can_set_xattr 4 37182 NULL
++crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL
++regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
++__do_replace_37227 __do_replace 5 37227 NULL
++rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
++prot_queue_del_37258 prot_queue_del 0 37258 NULL
++ath6kl_wmi_set_ie_cmd_37260 ath6kl_wmi_set_ie_cmd 6 37260 NULL
++exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
++nested_svm_map_37268 nested_svm_map 2 37268 NULL
++c101_run_37279 c101_run 2 37279 NULL
++srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
++isku_sysfs_write_talkfx_37298 isku_sysfs_write_talkfx 6 37298 NULL
++ieee80211_if_read_power_mode_37305 ieee80211_if_read_power_mode 3 37305 NULL
++jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
++send_msg_37323 send_msg 4 37323 NULL
++brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
++l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL
++scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
++rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
++security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
++iommu_num_pages_37391 iommu_num_pages 0-1-3-2 37391 NULL
++sys_getxattr_37418 sys_getxattr 4 37418 NULL
++hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
++acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL nohasharray
++find_next_bit_37422 find_next_bit 0-2-3 37422 &acpi_os_allocate_zeroed_37422
++tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4 37428 NULL
++iwl_print_last_event_logs_37433 iwl_print_last_event_logs 0-7-9 37433 NULL
++tty_audit_log_37440 tty_audit_log 5 37440 NULL
++tcp_established_options_37450 tcp_established_options 0 37450 NULL
++brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
++__remove_37457 __remove 2 37457 NULL
++ufs_data_ptr_to_cpu_37475 ufs_data_ptr_to_cpu 0 37475 NULL
++get_est_timing_37484 get_est_timing 0 37484 NULL
++kmem_realloc_37489 kmem_realloc 2 37489 NULL
++kvm_vcpu_compat_ioctl_37500 kvm_vcpu_compat_ioctl 3 37500 NULL
++vmalloc_32_user_37519 vmalloc_32_user 1 37519 NULL
++fault_inject_read_37534 fault_inject_read 3 37534 NULL
++hdr_size_37536 hdr_size 0 37536 NULL
++a2p_37544 a2p 0-1 37544 NULL
++sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL nohasharray
++nf_nat_mangle_tcp_packet_37551 nf_nat_mangle_tcp_packet 6-8 37551 &sep_create_dcb_dmatables_context_37551
++xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
++mlx4_get_mgm_entry_size_37607 mlx4_get_mgm_entry_size 0 37607 NULL
++kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6-3 37611 NULL
++SYSC_mbind_37622 SYSC_mbind 5 37622 NULL
++btrfs_calc_trans_metadata_size_37629 btrfs_calc_trans_metadata_size 0-2 37629 NULL nohasharray
++policy_residency_37629 policy_residency 0 37629 &btrfs_calc_trans_metadata_size_37629
++check_pt_base_37635 check_pt_base 3 37635 NULL
++alloc_fd_37637 alloc_fd 1 37637 NULL
++bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
++rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
++vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
++SYSC_get_mempolicy_37664 SYSC_get_mempolicy 4-3 37664 NULL
++lnw_gpio_to_irq_37665 lnw_gpio_to_irq 2 37665 NULL
++ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
++regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
++nametbl_header_37698 nametbl_header 2 37698 NULL
++__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
++dm_thin_remove_block_37724 dm_thin_remove_block 2 37724 NULL
++find_active_uprobe_37733 find_active_uprobe 1 37733 NULL
++read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
++ocfs2_duplicate_clusters_by_jbd_37749 ocfs2_duplicate_clusters_by_jbd 6-4-5 37749 NULL
++ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
++ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
++dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
++dma_pte_addr_37784 dma_pte_addr 0 37784 NULL
++il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
++smk_read_logging_37804 smk_read_logging 3 37804 NULL
++deny_write_access_37813 deny_write_access 0 37813 NULL
++rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
++bitmap_find_next_zero_area_37827 bitmap_find_next_zero_area 2-3-5-4 37827 NULL
++o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
++isku_sysfs_write_last_set_37868 isku_sysfs_write_last_set 6 37868 NULL nohasharray
++xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 &isku_sysfs_write_last_set_37868
++sys_setxattr_37880 sys_setxattr 4 37880 NULL
++dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
++max77686_irq_domain_map_37897 max77686_irq_domain_map 2 37897 NULL
++compat_sys_rt_sigpending_37899 compat_sys_rt_sigpending 2 37899 NULL
++tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
++pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
++read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
++fifo_alloc_37961 fifo_alloc 1 37961 NULL
++ext3_free_blocks_sb_37967 ext3_free_blocks_sb 3-4 37967 NULL
++rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
++persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL
++vfs_readv_38011 vfs_readv 3 38011 NULL
++aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
++klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 NULL nohasharray
++il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 &klsi_105_prepare_write_buffer_38044
++SyS_llistxattr_38048 SyS_llistxattr 3 38048 NULL
++_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL nohasharray
++is_discarded_38058 is_discarded 2 38058 &_xfs_buf_alloc_38058
++nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
++alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
++xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL
++uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
++tcf_csum_ipv4_udp_38089 tcf_csum_ipv4_udp 3 38089 NULL
++request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
++proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
++ep0_read_38095 ep0_read 3 38095 NULL
++sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL
++rbd_obj_read_sync_38098 rbd_obj_read_sync 3-4 38098 NULL
++snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
++vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
++__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 3-4 38153 NULL
++kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
++cdev_add_38176 cdev_add 2-3 38176 NULL
++brcmf_sdcard_recv_buf_38179 brcmf_sdcard_recv_buf 6 38179 NULL
++rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
++get_ucode_user_38202 get_ucode_user 3 38202 NULL
++ext3_new_block_38208 ext3_new_block 3 38208 NULL
++stmpe_gpio_irq_map_38222 stmpe_gpio_irq_map 3 38222 NULL
++osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
++palmas_gpio_to_irq_38235 palmas_gpio_to_irq 2 38235 NULL
++vhost_net_compat_ioctl_38237 vhost_net_compat_ioctl 3 38237 NULL
++_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
++from_dblock_38256 from_dblock 0-1 38256 NULL
++vmci_qp_broker_set_page_store_38260 vmci_qp_broker_set_page_store 2-3 38260 NULL
++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
++SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268 nohasharray
++mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &SYSC_msgrcv_38268
++xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
++xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
++ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL nohasharray
++swiotlb_sync_single_for_cpu_38281 swiotlb_sync_single_for_cpu 2 38281 &ftdi_process_packet_38281
++gpa_to_gfn_38291 gpa_to_gfn 0-1 38291 NULL
++zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
++isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
++ida_simple_get_38326 ida_simple_get 2 38326 NULL
++__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
++pyra_sysfs_write_38370 pyra_sysfs_write 6 38370 NULL
++dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
++get_valid_node_allowed_38412 get_valid_node_allowed 1-0 38412 NULL
++ocfs2_which_cluster_group_38413 ocfs2_which_cluster_group 2 38413 NULL
++ht_destroy_irq_38418 ht_destroy_irq 1 38418 NULL
++ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
++asix_write_cmd_async_38420 asix_write_cmd_async 5 38420 NULL
++pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
++pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
++kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
++i915_min_freq_read_38470 i915_min_freq_read 3 38470 NULL
++kvm_arch_setup_async_pf_38481 kvm_arch_setup_async_pf 3 38481 NULL
++blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL
++cpu_to_mem_38501 cpu_to_mem 0 38501 NULL
++dev_names_read_38509 dev_names_read 3 38509 NULL
++iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
++event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
++set_queue_count_38519 set_queue_count 0 38519 NULL
++mlx4_ib_db_map_user_38529 mlx4_ib_db_map_user 2 38529 NULL
++ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
++btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
++cpu_to_node_38561 cpu_to_node 0 38561 NULL
++irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
++il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
++_ipw_read32_38565 _ipw_read32 0 38565 NULL
++snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
++copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
++cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
++compat_sys_ptrace_38595 compat_sys_ptrace 3-4 38595 NULL
++icn_writecmd_38629 icn_writecmd 2 38629 NULL
++ext2_readpages_38640 ext2_readpages 4 38640 NULL
++cma_create_area_38642 cma_create_area 2 38642 NULL
++audit_init_entry_38644 audit_init_entry 1 38644 NULL
++qp_broker_alloc_38646 qp_broker_alloc 5-6 38646 NULL
++mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
++nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
++skb_tnl_header_len_38669 skb_tnl_header_len 0 38669 NULL
++cfg80211_send_disassoc_38678 cfg80211_send_disassoc 3 38678 NULL
++iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
++ext4_wait_block_bitmap_38695 ext4_wait_block_bitmap 2 38695 NULL
++rbio_add_io_page_38700 rbio_add_io_page 6 38700 NULL
++find_next_usable_block_38716 find_next_usable_block 1-3 38716 NULL
++alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
++phys_to_virt_38757 phys_to_virt 0-1 38757 NULL
++udf_readpages_38761 udf_readpages 4 38761 NULL
++iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
++snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
++err_decode_38804 err_decode 2 38804 NULL
++ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
++sys_select_38827 sys_select 1 38827 NULL
++b43_txhdr_size_38832 b43_txhdr_size 0 38832 NULL
++direct_entry_38836 direct_entry 3 38836 NULL
++compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
++interfaces_38859 interfaces 2 38859 NULL
++pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
++sizeof_gpio_leds_priv_38882 sizeof_gpio_leds_priv 0-1 38882 NULL
++dbgfs_state_38894 dbgfs_state 3 38894 NULL
++f2fs_xattr_set_acl_38895 f2fs_xattr_set_acl 4 38895 NULL
++process_bulk_data_command_38906 process_bulk_data_command 4 38906 NULL
++ext3_trim_all_free_38929 ext3_trim_all_free 3-4-2 38929 NULL
++sbp_count_se_tpg_luns_38943 sbp_count_se_tpg_luns 0 38943 NULL
++__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
++C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 NULL nohasharray
++usb_maxpacket_38977 usb_maxpacket 0 38977 &C_SYSC_preadv64_38977
++OSDSetBlock_38986 OSDSetBlock 4-2 38986 NULL
++udf_new_block_38999 udf_new_block 4 38999 NULL
++get_nodes_39012 get_nodes 3 39012 NULL
++twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL
++acpi_install_gpe_block_39031 acpi_install_gpe_block 4 39031 NULL
++_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
++line6_midibuf_read_39067 line6_midibuf_read 0-3 39067 NULL
++ext4_init_block_bitmap_39071 ext4_init_block_bitmap 3 39071 NULL
++tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
++__kfifo_to_user_r_39123 __kfifo_to_user_r 3 39123 NULL
++ea_foreach_39133 ea_foreach 0 39133 NULL
++generic_permission_39150 generic_permission 0 39150 NULL
++alloc_ring_39151 alloc_ring 2-4 39151 NULL
++proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
++create_bounce_buffer_39155 create_bounce_buffer 3 39155 NULL
++ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
++init_list_set_39188 init_list_set 2-3 39188 NULL
++ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
++qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
++qla4_82xx_pci_mem_read_direct_39208 qla4_82xx_pci_mem_read_direct 2 39208 NULL
++vfio_group_fops_compat_ioctl_39219 vfio_group_fops_compat_ioctl 3 39219 NULL
++ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
++batadv_tt_response_fill_table_39236 batadv_tt_response_fill_table 1 39236 NULL
++posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL
++drm_order_39244 drm_order 0 39244 NULL
++r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
++pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
++__skb_cow_39254 __skb_cow 2 39254 NULL
++ath6kl_wmi_set_appie_cmd_39266 ath6kl_wmi_set_appie_cmd 5 39266 NULL
++rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
++__vmalloc_node_39308 __vmalloc_node 1 39308 NULL
++__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
++wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
++__cfg80211_send_deauth_39344 __cfg80211_send_deauth 3 39344 NULL
++__copy_from_user_nocache_39351 __copy_from_user_nocache 3 39351 NULL
++ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
++do_write_log_from_user_39362 do_write_log_from_user 3 39362 NULL
++vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
++regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
++fnic_trace_debugfs_read_39380 fnic_trace_debugfs_read 3 39380 NULL
++ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
++__send_to_port_39386 __send_to_port 3 39386 NULL
++user_power_read_39414 user_power_read 3 39414 NULL
++alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
++sys_semop_39457 sys_semop 3 39457 NULL
++ptrace_peek_siginfo_39458 ptrace_peek_siginfo 3 39458 NULL
++setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
++do_get_mempolicy_39485 do_get_mempolicy 3 39485 NULL
++ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
++atomic64_read_unchecked_39505 atomic64_read_unchecked 0 39505 NULL
++int_proc_write_39542 int_proc_write 3 39542 NULL
++pp_write_39554 pp_write 3 39554 NULL
++ol_dqblk_block_39558 ol_dqblk_block 0-3-2 39558 NULL
++datablob_format_39571 datablob_format 2 39571 NULL nohasharray
++ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
++handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
++mtdchar_compat_ioctl_39602 mtdchar_compat_ioctl 3 39602 NULL
++n_tty_compat_ioctl_helper_39605 n_tty_compat_ioctl_helper 4 39605 NULL
++ext_depth_39607 ext_depth 0 39607 NULL
++nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
++sdio_readb_39618 sdio_readb 0 39618 NULL
++set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
++dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
++snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
++tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL nohasharray
++prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 &tcp_try_rmem_schedule_39657
++kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
++v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
++hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL
++do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
++ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
++tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
++remap_to_origin_clear_discard_39767 remap_to_origin_clear_discard 3 39767 NULL
++ocfs2_pages_per_cluster_39790 ocfs2_pages_per_cluster 0 39790 NULL
++crypto_ablkcipher_blocksize_39811 crypto_ablkcipher_blocksize 0 39811 NULL
++security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
++snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
++sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
++get_priv_size_39828 get_priv_size 0-1 39828 NULL
++pkt_add_39897 pkt_add 3 39897 NULL
++read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
++gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
++dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
++aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
++exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
++oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
++__spi_async_39932 __spi_async 0 39932 NULL
++__get_order_39935 __get_order 0 39935 NULL
++error_error_frame_read_39947 error_error_frame_read 3 39947 NULL nohasharray
++fwnet_pd_new_39947 fwnet_pd_new 4 39947 &error_error_frame_read_39947
++tty_prepare_flip_string_39955 tty_prepare_flip_string 3 39955 NULL
++dma_push_rx_39973 dma_push_rx 2 39973 NULL
++vfio_pci_read_39975 vfio_pci_read 3 39975 NULL
++broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
++mthca_array_init_39987 mthca_array_init 2 39987 NULL
++xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
++nf_nat_icmpv6_reply_translation_40023 nf_nat_icmpv6_reply_translation 5 40023 NULL nohasharray
++ivtvfb_write_40023 ivtvfb_write 3 40023 &nf_nat_icmpv6_reply_translation_40023
++disc_pwup_write_40027 disc_pwup_write 3 40027 NULL
++ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
++datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
++regmap_add_irq_chip_40042 regmap_add_irq_chip 4 40042 NULL
++add_tty_40055 add_tty 1 40055 NULL nohasharray
++l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 &add_tty_40055
++atomic_xchg_40070 atomic_xchg 0 40070 NULL
++gen_pool_first_fit_40110 gen_pool_first_fit 2-3-4 40110 NULL
++sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
++dwc2_max_desc_num_40132 dwc2_max_desc_num 0 40132 NULL
++rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
++iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
++pt_write_40159 pt_write 3 40159 NULL
++scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
++ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL nohasharray
++devnode_find_40199 devnode_find 3-2 40199 &ipr_alloc_ucode_buffer_40199
++allocate_probes_40204 allocate_probes 1 40204 NULL
++compat_put_long_40214 compat_put_long 1 40214 NULL
++au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
++osst_read_40237 osst_read 3 40237 NULL
++lpage_info_slot_40243 lpage_info_slot 1-3 40243 NULL
++ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
++of_get_child_count_40254 of_get_child_count 0 40254 NULL
++rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
++usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
++rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
++_calc_trunk_info_40291 _calc_trunk_info 2 40291 NULL
++crash_free_reserved_phys_range_40292 crash_free_reserved_phys_range 1 40292 NULL
++ubi_io_write_data_40305 ubi_io_write_data 4-5 40305 NULL
++batadv_tt_changes_fill_buff_40323 batadv_tt_changes_fill_buff 4 40323 NULL
++ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
++mmio_read_40348 mmio_read 4 40348 NULL
++usb_dump_interface_40353 usb_dump_interface 0 40353 NULL
++ocfs2_release_clusters_40355 ocfs2_release_clusters 4 40355 NULL
++event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
++ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 2-3 40365 NULL
++fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
++brcmf_sdbrcm_get_image_40397 brcmf_sdbrcm_get_image 0-2 40397 NULL
++atmel_rmem16_40450 atmel_rmem16 0 40450 NULL
++tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
++zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
++batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
++devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
++tty_write_room_40495 tty_write_room 0 40495 NULL
++persistent_ram_new_40501 persistent_ram_new 1-2 40501 NULL
++sg_phys_40507 sg_phys 0 40507 NULL
++TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
++ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL
++ima_write_policy_40548 ima_write_policy 3 40548 NULL
++esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
++ufs_inode_getfrag_40560 ufs_inode_getfrag 2-4 40560 NULL
++bdev_sectors_40564 bdev_sectors 0 40564 NULL
++lba_to_map_index_40580 lba_to_map_index 0-1 40580 NULL
++skge_rx_get_40598 skge_rx_get 3 40598 NULL
++get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
++bl_mark_sectors_init_40613 bl_mark_sectors_init 2-3 40613 NULL
++cpuset_sprintf_cpulist_40627 cpuset_sprintf_cpulist 0 40627 NULL
++twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL
++__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL
++pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
++fops_read_40672 fops_read 3 40672 NULL
++alloc_rbio_40676 alloc_rbio 4 40676 NULL
++videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 3 40678 NULL
++nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
++vfio_pci_config_rw_40698 vfio_pci_config_rw 3 40698 NULL
++__seq_open_private_40715 __seq_open_private 3 40715 NULL
++fuse_readpages_40737 fuse_readpages 4 40737 NULL
++xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL nohasharray
++find_next_zero_bit_le_40744 find_next_zero_bit_le 0-2-3 40744 &xfs_iext_remove_direct_40744
++security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
++fat_generic_compat_ioctl_40755 fat_generic_compat_ioctl 3 40755 NULL
++card_send_command_40757 card_send_command 3 40757 NULL
++ad1889_readl_40765 ad1889_readl 0 40765 NULL
++pg_write_40766 pg_write 3 40766 NULL
++show_list_40775 show_list 3 40775 NULL
++calcu_metadata_size_40782 calcu_metadata_size 0 40782 NULL
++kfifo_out_copy_r_40784 kfifo_out_copy_r 0-3 40784 NULL
++bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
++pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL
++netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL
++nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
++SyS_mbind_40828 SyS_mbind 5 40828 NULL
++__mlx4_qp_reserve_range_40847 __mlx4_qp_reserve_range 2-3 40847 NULL
++isku_sysfs_write_keys_thumbster_40851 isku_sysfs_write_keys_thumbster 6 40851 NULL
++ocfs2_zero_partial_clusters_40856 ocfs2_zero_partial_clusters 2-3 40856 NULL
++v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
++read_file_queue_40895 read_file_queue 3 40895 NULL
++waiters_read_40902 waiters_read 3 40902 NULL
++isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
++gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
++vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
++snd_vx_create_40948 snd_vx_create 4 40948 NULL
++skb_end_offset_40949 skb_end_offset 0 40949 NULL
++wm8994_free_irq_40951 wm8994_free_irq 2 40951 NULL
++rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
++il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
++mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
++mtd_block_isbad_41015 mtd_block_isbad 0 41015 NULL
++_req_append_segment_41031 _req_append_segment 2 41031 NULL
++mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
++ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
++vfs_listxattr_41062 vfs_listxattr 0 41062 NULL
++cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
++roccat_read_41093 roccat_read 3 41093 NULL nohasharray
++nvme_map_user_pages_41093 nvme_map_user_pages 3-4 41093 &roccat_read_41093
++dma_attach_41094 dma_attach 5-6 41094 NULL
++provide_user_output_41105 provide_user_output 3 41105 NULL
++f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
++v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
++tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
++dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
++ol_quota_chunk_block_41177 ol_quota_chunk_block 0-2 41177 NULL
++netif_get_num_default_rss_queues_41187 netif_get_num_default_rss_queues 0 41187 NULL
++compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
++dfs_file_write_41196 dfs_file_write 3 41196 NULL
++xfs_readdir_41200 xfs_readdir 3 41200 NULL
++ocfs2_read_quota_block_41207 ocfs2_read_quota_block 2 41207 NULL
++nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
++hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2-3 41255 NULL
++erst_read_41260 erst_read 0 41260 NULL
++__fprog_create_41263 __fprog_create 2 41263 NULL
++setup_cluster_bitmap_41270 setup_cluster_bitmap 4 41270 NULL
++alloc_context_41283 alloc_context 1 41283 NULL
++arch_gnttab_map_shared_41306 arch_gnttab_map_shared 3 41306 NULL
++objio_alloc_io_state_41316 objio_alloc_io_state 6 41316 NULL
++twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
++cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
++jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
++kmp_init_41373 kmp_init 2 41373 NULL
++isr_commands_read_41398 isr_commands_read 3 41398 NULL
++is_writethrough_io_41406 is_writethrough_io 3 41406 NULL
++sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
++rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
++xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
++vsock_dev_compat_ioctl_41427 vsock_dev_compat_ioctl 3 41427 NULL
++isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
++lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
++iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
++ntfs_file_buffered_write_41442 ntfs_file_buffered_write 4-6 41442 NULL
++pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
++layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
++rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
++wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
++SyS_get_mempolicy_41495 SyS_get_mempolicy 3-4 41495 NULL
++hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
++xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
++ldisc_receive_41516 ldisc_receive 4 41516 NULL
++tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
++ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
++nr_status_frames_41559 nr_status_frames 0-1 41559 NULL
++batadv_receive_client_update_packet_41578 batadv_receive_client_update_packet 3 41578 NULL
++rng_dev_read_41581 rng_dev_read 3 41581 NULL
++read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
++vga_io_r_41609 vga_io_r 0 41609 NULL
++tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
++usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 NULL
++a2mp_send_41615 a2mp_send 4 41615 NULL
++btrfs_calc_trunc_metadata_size_41626 btrfs_calc_trunc_metadata_size 0-2 41626 NULL
++mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
++rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
++get_std_timing_41654 get_std_timing 0 41654 NULL
++squashfs_cache_init_41656 squashfs_cache_init 2 41656 NULL
++ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
++params_period_bytes_41683 params_period_bytes 0 41683 NULL
++aac_src_ioremap_41688 aac_src_ioremap 2 41688 NULL
++bdx_tx_db_init_41719 bdx_tx_db_init 2 41719 NULL
++sys_pwritev_41722 sys_pwritev 3 41722 NULL
++get_bios_ebda_41730 get_bios_ebda 0 41730 NULL
++fillonedir_41746 fillonedir 3 41746 NULL
++ocfs2_dx_dir_rebalance_41793 ocfs2_dx_dir_rebalance 7 41793 NULL
++iwl_dbgfs_bt_notif_read_41794 iwl_dbgfs_bt_notif_read 3 41794 NULL
++hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
++regcache_sync_block_raw_41803 regcache_sync_block_raw 3-4 41803 NULL
++da9052_enable_irq_41814 da9052_enable_irq 2 41814 NULL
++sco_send_frame_41815 sco_send_frame 3 41815 NULL
++lp_gpio_to_irq_41822 lp_gpio_to_irq 2 41822 NULL
++ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
++do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
++keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
++ieee80211_rx_radiotap_space_41870 ieee80211_rx_radiotap_space 0 41870 NULL
++get_packet_41914 get_packet 3 41914 NULL
++get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
++find_ge_pid_41918 find_ge_pid 1 41918 NULL
++build_inv_iotlb_pages_41922 build_inv_iotlb_pages 4-5 41922 NULL
++nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
++ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL
++read_gssp_41947 read_gssp 3 41947 NULL
++ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
++portnames_read_41958 portnames_read 3 41958 NULL
++ubi_self_check_all_ff_41959 ubi_self_check_all_ff 4 41959 NULL
++dst_mtu_41969 dst_mtu 0 41969 NULL
++cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
++ubi_io_is_bad_41983 ubi_io_is_bad 0 41983 NULL
++lguest_map_42008 lguest_map 1-2 42008 NULL
++pool_allocate_42012 pool_allocate 3 42012 NULL
++spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
++acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
++__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
++irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
++dma_generic_alloc_coherent_42048 dma_generic_alloc_coherent 2 42048 NULL nohasharray
++jffs2_do_link_42048 jffs2_do_link 6 42048 &dma_generic_alloc_coherent_42048
++ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
++InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
++alloc_bitset_42085 alloc_bitset 1 42085 NULL
++scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
++sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
++submit_inquiry_42108 submit_inquiry 3 42108 NULL
++sysfs_read_file_42113 sysfs_read_file 3 42113 NULL nohasharray
++dw_dma_cyclic_prep_42113 dw_dma_cyclic_prep 3-4 42113 &sysfs_read_file_42113
++Read_hfc16_stable_42131 Read_hfc16_stable 0 42131 NULL
++mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
++read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
++iwl_mvm_send_cmd_42173 iwl_mvm_send_cmd 0 42173 NULL
++oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
++get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
++btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
++rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
++netxen_nic_map_indirect_address_128M_42257 netxen_nic_map_indirect_address_128M 2 42257 NULL
++savu_sysfs_write_42273 savu_sysfs_write 6 42273 NULL
++snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
++sel_read_perm_42302 sel_read_perm 3 42302 NULL
++sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
++ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
++gfn_to_hva_42305 gfn_to_hva 0-2 42305 NULL
++xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
++free_cblock_42318 free_cblock 2 42318 NULL
++hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
++tcp_sync_mss_42330 tcp_sync_mss 0-2 42330 NULL
++snd_pcm_plug_alloc_42339 snd_pcm_plug_alloc 2 42339 NULL
++ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
++il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
++hash_ipportnet4_expire_42391 hash_ipportnet4_expire 3 42391 NULL
++msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
++krng_get_random_42420 krng_get_random 3 42420 NULL
++gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
++key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
++snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
++tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
++tc3589x_gpio_irq_get_virq_42457 tc3589x_gpio_irq_get_virq 2 42457 NULL
++ext3_valid_block_bitmap_42459 ext3_valid_block_bitmap 3 42459 NULL
++__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
++follow_hugetlb_page_42486 follow_hugetlb_page 0-7 42486 NULL
++omfs_readpages_42490 omfs_readpages 4 42490 NULL
++brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
++bypass_write_42498 bypass_write 3 42498 NULL
++kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
++smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
++snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
++dbAllocNear_42546 dbAllocNear 0 42546 NULL
++i915_ring_stop_read_42549 i915_ring_stop_read 3 42549 NULL nohasharray
++ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 &i915_ring_stop_read_42549
++iwl_print_event_log_42566 iwl_print_event_log 0-5-7 42566 NULL
++xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
++oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
++map_state_42602 map_state 1 42602 NULL nohasharray
++__pskb_pull_42602 __pskb_pull 2 42602 &map_state_42602
++nd_get_link_42603 nd_get_link 0 42603 NULL
++sys_move_pages_42626 sys_move_pages 2 42626 NULL
++resp_write_42628 resp_write 2 42628 NULL
++ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
++scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
++br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
++l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
++request_key_and_link_42693 request_key_and_link 4 42693 NULL
++acpi_dev_get_irqresource_42694 acpi_dev_get_irqresource 2 42694 NULL
++vb2_read_42703 vb2_read 3 42703 NULL
++sierra_net_send_cmd_42708 sierra_net_send_cmd 3 42708 NULL
++__ocfs2_decrease_refcount_42717 __ocfs2_decrease_refcount 4 42717 NULL
++dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
++set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
++ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
++xen_bind_pirq_gsi_to_irq_42750 xen_bind_pirq_gsi_to_irq 1 42750 NULL
++snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
++cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL nohasharray
++isku_sysfs_read_info_42781 isku_sysfs_read_info 6 42781 &cryptd_hash_setkey_42781
++koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
++ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0-2 42796 NULL
++fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2-3 42804 NULL
++drm_ioctl_42813 drm_ioctl 2 42813 NULL
++iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
++set_arg_42824 set_arg 3 42824 NULL
++si476x_radio_read_rsq_blob_42827 si476x_radio_read_rsq_blob 3 42827 NULL
++ocfs2_desc_bitmap_to_cluster_off_42831 ocfs2_desc_bitmap_to_cluster_off 2 42831 NULL
++prandom_u32_42853 prandom_u32 0 42853 NULL
++of_property_count_strings_42863 of_property_count_strings 0 42863 NULL
++ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
++pskb_expand_head_42881 pskb_expand_head 2-3 42881 NULL
++vt_compat_ioctl_42887 vt_compat_ioctl 3 42887 NULL
++tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
++xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
++SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
++hd_end_request_42904 hd_end_request 2 42904 NULL
++sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL
++sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
++get_unmapped_area_42944 get_unmapped_area 0 42944 NULL
++sys_sethostname_42962 sys_sethostname 2 42962 NULL
++read_file_node_stat_42964 read_file_node_stat 3 42964 NULL
++compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
++snd_timer_user_ioctl_compat_42985 snd_timer_user_ioctl_compat 3 42985 NULL
++nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
++mlx4_qp_reserve_range_43000 mlx4_qp_reserve_range 2-3 43000 NULL
++isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
++add_bytes_to_bitmap_43026 add_bytes_to_bitmap 0 43026 NULL
++wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
++nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
++nfs_map_group_to_gid_43082 nfs_map_group_to_gid 3 43082 NULL
++cpuset_sprintf_memlist_43088 cpuset_sprintf_memlist 0 43088 NULL
++ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
++read_file_dfs_43145 read_file_dfs 3 43145 NULL nohasharray
++i2c_hid_get_report_43145 i2c_hid_get_report 0 43145 &read_file_dfs_43145
++uuid_string_43154 uuid_string 0 43154 NULL
++usb_string_sub_43164 usb_string_sub 0 43164 NULL
++il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
++ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
++process_measurement_43190 process_measurement 0 43190 NULL
++ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
++uio_write_43202 uio_write 3 43202 NULL
++iso_callback_43208 iso_callback 3 43208 NULL
++f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL
++atomic_long_add_return_43217 atomic_long_add_return 1 43217 NULL
++comedi_compat_ioctl_43218 comedi_compat_ioctl 3 43218 NULL
++vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
++fixup_leb_43256 fixup_leb 3 43256 NULL
++ide_end_rq_43269 ide_end_rq 4 43269 NULL
++evtchn_write_43278 evtchn_write 3 43278 NULL
++filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
++mpage_alloc_43299 mpage_alloc 3 43299 NULL
++get_nr_irqs_gsi_43315 get_nr_irqs_gsi 0 43315 NULL
++mmu_set_spte_43327 mmu_set_spte 6-7 43327 NULL
++__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
++kvm_host_page_size_43348 kvm_host_page_size 2 43348 NULL
++gart_free_coherent_43362 gart_free_coherent 4-2 43362 NULL
++hash_net4_expire_43378 hash_net4_expire 3 43378 NULL
++__alloc_bootmem_low_43423 __alloc_bootmem_low 1-2 43423 NULL nohasharray
++gdm_wimax_netif_rx_43423 gdm_wimax_netif_rx 3 43423 &__alloc_bootmem_low_43423
++isku_sysfs_write_keys_capslock_43432 isku_sysfs_write_keys_capslock 6 43432 NULL
++usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
++ucs2_strsize_43438 ucs2_strsize 0 43438 NULL
++ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
++usemap_size_43443 usemap_size 0-2-1 43443 NULL nohasharray
++usb_string_43443 usb_string 0 43443 &usemap_size_43443
++alloc_new_reservation_43480 alloc_new_reservation 4 43480 NULL
++tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
++ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
++do_readlink_43518 do_readlink 2 43518 NULL
++dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
++cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
++tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL nohasharray
++ufs_alloccg_block_43540 ufs_alloccg_block 3-0 43540 &tx_frag_failed_read_43540
++ath_rx_init_43564 ath_rx_init 2 43564 NULL
++_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
++rpc_malloc_43573 rpc_malloc 2 43573 NULL
++lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
++proc_read_43614 proc_read 3 43614 NULL
++bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
++ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
++dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4-2 43679 NULL
++calgary_map_page_43686 calgary_map_page 3-4 43686 NULL
++max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL
++drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
++snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
++ocfs2_replace_clusters_43733 ocfs2_replace_clusters 5 43733 NULL
++osdv1_attr_list_elem_size_43747 osdv1_attr_list_elem_size 0-1 43747 NULL
++__bm_find_next_43748 __bm_find_next 2 43748 NULL
++gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
++sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
++ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
++byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
++btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1 43806 NULL
++ext4_read_block_bitmap_43814 ext4_read_block_bitmap 2 43814 NULL
++div64_u64_safe_43815 div64_u64_safe 1-2 43815 NULL
++ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
++ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
++p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
++read_flush_43851 read_flush 3 43851 NULL
++ocfs2_block_group_find_clear_bits_43874 ocfs2_block_group_find_clear_bits 4 43874 NULL
++pm860x_bulk_write_43875 pm860x_bulk_write 2-3 43875 NULL
++prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
++SendString_43928 SendString 3 43928 NULL
++xen_register_gsi_43946 xen_register_gsi 1-2 43946 NULL
++stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
++__get_required_blob_size_43980 __get_required_blob_size 0-2-3 43980 NULL
++nla_reserve_43984 nla_reserve 3 43984 NULL
++__clkdev_alloc_43990 __clkdev_alloc 1 43990 NULL
++scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
++bcm_recvmsg_43992 bcm_recvmsg 4 43992 &scsi_command_size_43992
++emit_flags_44006 emit_flags 4-3 44006 NULL
++write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
++swiotlb_unmap_page_44063 swiotlb_unmap_page 2 44063 NULL
++SYSC_add_key_44079 SYSC_add_key 4 44079 NULL
++load_discard_44083 load_discard 3 44083 NULL
++xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
++tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
++vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
++scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
++ubifs_find_dirty_idx_leb_44169 ubifs_find_dirty_idx_leb 0 44169 NULL
++ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
++SYSC_set_mempolicy_44176 SYSC_set_mempolicy 3 44176 NULL
++handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
++IO_APIC_get_PCI_irq_vector_44198 IO_APIC_get_PCI_irq_vector 0 44198 NULL
++__set_free_44211 __set_free 2 44211 NULL
++claim_ptd_buffers_44213 claim_ptd_buffers 3 44213 NULL
++srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
++ioapic_register_intr_44238 ioapic_register_intr 1 44238 NULL
++scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
++tc3589x_gpio_irq_map_44245 tc3589x_gpio_irq_map 2 44245 NULL
++enlarge_skb_44248 enlarge_skb 2 44248 NULL
++ufs_clusteracct_44293 ufs_clusteracct 3 44293 NULL
++ocfs2_zero_range_for_truncate_44294 ocfs2_zero_range_for_truncate 3 44294 NULL
++ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
++bitmap_scnprintf_44318 bitmap_scnprintf 2 44318 NULL
++dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
++ubi_eba_write_leb_st_44343 ubi_eba_write_leb_st 5 44343 NULL
++nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 NULL nohasharray
++blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 &nfs_fscache_get_super_cookie_44355
++__is_discarded_44359 __is_discarded 2 44359 NULL
++rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
++aoedev_flush_44398 aoedev_flush 2 44398 NULL
++drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
++osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
++check_user_page_hwpoison_44412 check_user_page_hwpoison 1 44412 NULL
++ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
++prandom_u32_state_44445 prandom_u32_state 0 44445 NULL
++___alloc_bootmem_node_nopanic_44461 ___alloc_bootmem_node_nopanic 2-3 44461 NULL
++btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
++sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
++bio_advance_44496 bio_advance 2 44496 NULL
++ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
++security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
++iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
++spidev_write_44510 spidev_write 3 44510 NULL
++sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
++comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
++hash_ipport4_expire_44564 hash_ipport4_expire 3 44564 NULL
++dgrp_config_proc_write_44571 dgrp_config_proc_write 3 44571 NULL
++snd_pcm_alloc_vmalloc_buffer_44595 snd_pcm_alloc_vmalloc_buffer 2 44595 NULL
++slip_compat_ioctl_44599 slip_compat_ioctl 4 44599 NULL
++brcmf_sdbrcm_glom_len_44618 brcmf_sdbrcm_glom_len 0 44618 NULL
++cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
++ext2_new_block_44645 ext2_new_block 2 44645 NULL
++alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
++mpi_resize_44674 mpi_resize 2 44674 NULL
++ts_read_44687 ts_read 3 44687 NULL
++qib_get_user_pages_44689 qib_get_user_pages 1-2 44689 NULL
++xfer_to_user_44713 xfer_to_user 3 44713 NULL
++_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
++clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
++fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
++key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
++WIL_GET_BITS_44747 WIL_GET_BITS 0-1-2-3 44747 NULL
++set_brk_44749 set_brk 1 44749 NULL
++tnode_new_44757 tnode_new 3 44757 NULL nohasharray
++pty_write_44757 pty_write 3 44757 &tnode_new_44757
++__videobuf_copy_stream_44769 __videobuf_copy_stream 4 44769 NULL
++handsfree_ramp_44777 handsfree_ramp 2 44777 NULL
++sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
++rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
++qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
++mei_cl_read_start_44824 mei_cl_read_start 2 44824 NULL
++rmap_write_protect_44833 rmap_write_protect 2 44833 NULL
++sisusb_write_44834 sisusb_write 3 44834 NULL
++nl80211_send_unprot_disassoc_44846 nl80211_send_unprot_disassoc 4 44846 NULL
++kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
++cubic_root_44848 cubic_root 1 44848 NULL
++copydesc_user_44855 copydesc_user 3 44855 NULL
++skb_availroom_44883 skb_availroom 0 44883 NULL
++nf_bridge_encap_header_len_44890 nf_bridge_encap_header_len 0 44890 NULL
++do_tty_write_44896 do_tty_write 5 44896 NULL
++tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
++nf_nat_seq_adjust_44989 nf_nat_seq_adjust 4 44989 NULL
++map_index_to_lba_44993 map_index_to_lba 0-1 44993 NULL
++bytepos_delta_45017 bytepos_delta 0 45017 NULL
++read_block_bitmap_45021 read_block_bitmap 2 45021 NULL nohasharray
++ptrace_writedata_45021 ptrace_writedata 4-3 45021 &read_block_bitmap_45021
++vhci_get_user_45039 vhci_get_user 3 45039 NULL
++sel_write_user_45060 sel_write_user 3 45060 NULL
++vmscan_swappiness_45062 vmscan_swappiness 0 45062 NULL
++snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL nohasharray
++do_video_ioctl_45069 do_video_ioctl 3 45069 &snd_mixart_BA0_read_45069
++kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL
++pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
++usbdev_read_45114 usbdev_read 3 45114 NULL
++isku_sysfs_write_reset_45133 isku_sysfs_write_reset 6 45133 NULL
++send_to_tty_45141 send_to_tty 3 45141 NULL
++stmpe_irq_map_45146 stmpe_irq_map 2 45146 NULL
++crypto_aead_blocksize_45148 crypto_aead_blocksize 0 45148 NULL
++gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
++ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 NULL nohasharray
++device_write_45156 device_write 3 45156 &ocfs2_remove_inode_range_45156
++ocfs2_dq_frozen_trigger_45159 ocfs2_dq_frozen_trigger 4 45159 NULL
++tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
++sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
++snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL
++num_clusters_in_group_45194 num_clusters_in_group 2 45194 NULL
++add_child_45201 add_child 4 45201 NULL
++iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
++spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
++__dirty_45228 __dirty 2 45228 NULL
++ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
++prism2_pda_proc_read_45246 prism2_pda_proc_read 3 45246 NULL
++input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
++vcc_compat_ioctl_45291 vcc_compat_ioctl 3 45291 NULL
++snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
++pte_val_45313 pte_val 0 45313 NULL
++__i2c_hid_command_45321 __i2c_hid_command 0 45321 NULL
++copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
++lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
++keymap_store_45406 keymap_store 4 45406 NULL
++paging64_gva_to_gpa_45421 paging64_gva_to_gpa 2 45421 NULL nohasharray
++ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval 3 45421 &paging64_gva_to_gpa_45421
++tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
++intel_render_ring_init_dri_45446 intel_render_ring_init_dri 2-3 45446 NULL nohasharray
++SYSC_mremap_45446 SYSC_mremap 5-1-2 45446 &intel_render_ring_init_dri_45446
++__node_remap_45458 __node_remap 4 45458 NULL
++rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
++udp_manip_pkt_45467 udp_manip_pkt 4 45467 NULL
++tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
++arizona_init_fll_45503 arizona_init_fll 5 45503 NULL
++rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
++sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
++cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
++copy_macs_45534 copy_macs 4 45534 NULL
++nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
++v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
++cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
++atomic_long_sub_return_45551 atomic_long_sub_return 1 45551 NULL
++ext3_group_first_block_no_45555 ext3_group_first_block_no 0-2 45555 NULL
++stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
++_regmap_bus_raw_write_45559 _regmap_bus_raw_write 2 45559 NULL
++posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
++venus_rmdir_45564 venus_rmdir 4 45564 NULL
++ipath_create_cq_45586 ipath_create_cq 2 45586 NULL
++rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
++hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
++audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
++ebitmap_next_positive_45651 ebitmap_next_positive 3 45651 NULL
++dma_map_cont_45668 dma_map_cont 5 45668 NULL
++compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
++dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
++smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
++dm_compat_ctl_ioctl_45692 dm_compat_ctl_ioctl 3 45692 NULL
++unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
++bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699
++dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
++snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
++rw_copy_check_uvector_45748 rw_copy_check_uvector 3 45748 NULL nohasharray
++v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
++lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
++nilfs_compat_ioctl_45769 nilfs_compat_ioctl 3 45769 NULL
++alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
++raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
++lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
++pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
++fm_v4l2_init_video_device_45821 fm_v4l2_init_video_device 2 45821 NULL
++memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
++x509_process_extension_45854 x509_process_extension 5 45854 NULL
++isdn_write_45863 isdn_write 3 45863 NULL
++unpack_orig_pfns_45867 unpack_orig_pfns 0 45867 NULL
++get_rdac_req_45882 get_rdac_req 3 45882 NULL
++ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
++wm_adsp_region_to_reg_45915 wm_adsp_region_to_reg 0-2 45915 NULL
++dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
++nf_nat_ftp_fmt_cmd_45926 nf_nat_ftp_fmt_cmd 0 45926 NULL
++smp_scan_config_45934 smp_scan_config 1 45934 NULL
++alloc_mr_45935 alloc_mr 1 45935 NULL
++split_large_page_45941 split_large_page 2 45941 NULL
++rb_simple_read_45972 rb_simple_read 3 45972 NULL
++ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
++ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
++ore_calc_stripe_info_46023 ore_calc_stripe_info 2 46023 NULL
++sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
++get_free_entries_46030 get_free_entries 1 46030 NULL
++__access_remote_vm_46031 __access_remote_vm 0-5-3 46031 NULL
++snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
++acpi_register_gsi_xen_hvm_46052 acpi_register_gsi_xen_hvm 2 46052 NULL
++line6_midibuf_bytes_used_46059 line6_midibuf_bytes_used 0 46059 NULL
++__ocfs2_move_extent_46060 __ocfs2_move_extent 5-6 46060 NULL nohasharray
++dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
++slhc_toss_46066 slhc_toss 0 46066 NULL
++sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
++vfio_config_do_rw_46091 vfio_config_do_rw 3 46091 NULL
++ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
++arizona_set_irq_wake_46101 arizona_set_irq_wake 2 46101 NULL
++pkt_ctl_compat_ioctl_46110 pkt_ctl_compat_ioctl 3 46110 NULL
++memcg_update_array_size_46111 memcg_update_array_size 1 46111 NULL nohasharray
++il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 &memcg_update_array_size_46111
++C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL
++mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
++paging32_walk_addr_nested_46121 paging32_walk_addr_nested 3 46121 NULL
++vb2_dma_sg_get_userptr_46146 vb2_dma_sg_get_userptr 2 46146 NULL
++__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
++twl_direction_out_46182 twl_direction_out 2 46182 NULL
++vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
++add_conn_list_46197 add_conn_list 3 46197 NULL
++i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
++tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
++dsp_write_46218 dsp_write 2 46218 NULL
++mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
++nf_nat_ftp_46265 nf_nat_ftp 6 46265 NULL
++ReadReg_46277 ReadReg 0 46277 NULL
++batadv_iv_ogm_queue_add_46319 batadv_iv_ogm_queue_add 3 46319 NULL
++qlcnic_83xx_sysfs_flash_bulk_write_46320 qlcnic_83xx_sysfs_flash_bulk_write 4 46320 NULL
++__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
++iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
++smk_write_direct_46363 smk_write_direct 3 46363 NULL
++__iommu_calculate_agaw_46366 __iommu_calculate_agaw 2 46366 NULL
++ubi_dump_flash_46381 ubi_dump_flash 4 46381 NULL
++fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
++crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
++hash_ipportip6_expire_46443 hash_ipportip6_expire 3 46443 NULL
++cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
++filldir64_46469 filldir64 3 46469 NULL
++fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL
++pin_code_reply_46510 pin_code_reply 4 46510 NULL
++mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
++kmsg_read_46514 kmsg_read 3 46514 NULL
++bdx_rxdb_create_46525 bdx_rxdb_create 1 46525 NULL
++nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
++pm860x_irq_domain_map_46553 pm860x_irq_domain_map 2 46553 NULL
++mv_get_hc_count_46554 mv_get_hc_count 0 46554 NULL
++link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
++irq_domain_associate_46564 irq_domain_associate 2 46564 NULL
++dn_current_mss_46574 dn_current_mss 0 46574 NULL
++serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
++snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
++il3945_stats_flag_46606 il3945_stats_flag 0-3 46606 NULL
++vscnprintf_46617 vscnprintf 0-2 46617 NULL
++__kfifo_out_r_46623 __kfifo_out_r 0-3 46623 NULL
++request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
++vfs_getxattr_alloc_46649 vfs_getxattr_alloc 0 46649 NULL
++av7110_ipack_init_46655 av7110_ipack_init 2 46655 NULL
++alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
++__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
++erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
++wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL
++prepare_copy_46725 prepare_copy 2 46725 NULL
++irq_domain_add_simple_46734 irq_domain_add_simple 2-3 46734 NULL
++set_memory_wc_46747 set_memory_wc 1 46747 NULL
++ext4_count_free_46754 ext4_count_free 2 46754 NULL nohasharray
++pte_pfn_46754 pte_pfn 0 46754 &ext4_count_free_46754
++hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
++int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
++regcache_lzo_sync_46777 regcache_lzo_sync 2 46777 NULL
++_sys_packet_req_46793 _sys_packet_req 4 46793 NULL
++_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
++xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
++shmem_pwrite_fast_46842 shmem_pwrite_fast 3 46842 NULL
++spi_async_46857 spi_async 0 46857 NULL
++vsnprintf_46863 vsnprintf 0 46863 NULL nohasharray
++SyS_move_pages_46863 SyS_move_pages 2 46863 &vsnprintf_46863
++nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
++sip_sprintf_addr_46872 sip_sprintf_addr 0 46872 NULL
++rvmalloc_46873 rvmalloc 1 46873 NULL
++qp_memcpy_from_queue_iov_46874 qp_memcpy_from_queue_iov 4-5 46874 NULL
++hpi_read_word_nolock_46881 hpi_read_word_nolock 0 46881 NULL
++stmpe_gpio_irq_unmap_46884 stmpe_gpio_irq_unmap 2 46884 NULL
++ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
++sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
++ol_dqblk_off_46904 ol_dqblk_off 3-2 46904 NULL
++ieee80211_if_fmt_power_mode_46906 ieee80211_if_fmt_power_mode 3 46906 NULL
++wlcore_alloc_hw_46917 wlcore_alloc_hw 1 46917 NULL
++fb_write_46924 fb_write 3 46924 NULL
++btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
++kvm_register_read_46948 kvm_register_read 0 46948 NULL
++__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
++qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
++crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
++mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
++gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 NULL nohasharray
++sel_write_bool_46996 sel_write_bool 3 46996 &gfs2_xattr_system_set_46996
++ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
++blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
++add_free_space_entry_47005 add_free_space_entry 2 47005 NULL
++__map_single_47020 __map_single 3-4-7 47020 NULL
++cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2 47024 NULL
++swiotlb_sync_single_47031 swiotlb_sync_single 2 47031 NULL
++set_dis_bypass_pfs_47038 set_dis_bypass_pfs 3 47038 NULL
++fs_path_len_47060 fs_path_len 0 47060 NULL
++ufs_new_fragments_47070 ufs_new_fragments 3-5-4 47070 NULL
++pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
++scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
++iwl_dump_nic_event_log_47089 iwl_dump_nic_event_log 0 47089 NULL
++mousedev_read_47123 mousedev_read 3 47123 NULL
++ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
++acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
++persistent_ram_iomap_47156 persistent_ram_iomap 1-2 47156 NULL
++mxms_headerlen_47161 mxms_headerlen 0 47161 NULL
++rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
++rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL
++svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
++can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
++ioremap_cache_47189 ioremap_cache 1-2 47189 NULL
++gnttab_set_map_op_47206 gnttab_set_map_op 2 47206 NULL
++l2headersize_47238 l2headersize 0 47238 NULL
++options_write_47243 options_write 3 47243 NULL
++portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
++da9052_disable_irq_nosync_47260 da9052_disable_irq_nosync 2 47260 NULL
++ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
++tty_audit_log_47280 tty_audit_log 8 47280 NULL
++gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
++vsnprintf_47291 vsnprintf 0 47291 NULL
++SYSC_semop_47292 SYSC_semop 3 47292 NULL
++tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
++SyS_madvise_47354 SyS_madvise 1 47354 NULL
++ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
++avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
++find_first_zero_bit_le_47369 find_first_zero_bit_le 2 47369 NULL
++__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
++nv_rd32_47390 nv_rd32 0 47390 NULL nohasharray
++trace_options_core_read_47390 trace_options_core_read 3 47390 &nv_rd32_47390
++nametbl_list_47391 nametbl_list 2 47391 NULL
++dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
++pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
++gfn_to_pfn_prot_47398 gfn_to_pfn_prot 2 47398 NULL
++ocfs2_resv_end_47408 ocfs2_resv_end 0 47408 NULL
++sta_vht_capa_read_47409 sta_vht_capa_read 3 47409 NULL
++crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
++vzalloc_47421 vzalloc 1 47421 NULL
++hash_ipportip4_expire_47426 hash_ipportip4_expire 3 47426 NULL
++posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
++__load_mapping_47460 __load_mapping 2 47460 NULL
++nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL
++wb_force_mapping_47485 wb_force_mapping 2 47485 NULL nohasharray
++newpart_47485 newpart 6 47485 &wb_force_mapping_47485
++core_sys_select_47494 core_sys_select 1 47494 NULL
++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
++unlink_simple_47506 unlink_simple 3 47506 NULL
++ufs_inode_getblock_47512 ufs_inode_getblock 4 47512 NULL
++vscnprintf_47533 vscnprintf 0-2 47533 NULL nohasharray
++process_vm_rw_47533 process_vm_rw 3-5 47533 &vscnprintf_47533
++oz_events_read_47535 oz_events_read 3 47535 NULL
++ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
++read_ldt_47570 read_ldt 2 47570 NULL
++_rtl_rx_get_padding_47572 _rtl_rx_get_padding 0 47572 NULL nohasharray
++isku_sysfs_read_last_set_47572 isku_sysfs_read_last_set 6 47572 &_rtl_rx_get_padding_47572
++pci_iomap_47575 pci_iomap 3 47575 NULL
++rpipe_get_idx_47579 rpipe_get_idx 2 47579 NULL
++SYSC_fcntl64_47581 SYSC_fcntl64 3 47581 NULL
++ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
++sctp_ssnmap_new_47608 sctp_ssnmap_new 1-2 47608 NULL
++uea_request_47613 uea_request 4 47613 NULL
++cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
++twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL
++irq_set_chip_47638 irq_set_chip 1 47638 NULL
++__build_packet_message_47643 __build_packet_message 3-9 47643 NULL
++global_rt_runtime_47712 global_rt_runtime 0 47712 NULL
++save_microcode_47717 save_microcode 3 47717 NULL
++bits_to_user_47733 bits_to_user 2-3 47733 NULL
++carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
++ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
++mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
++ext3_find_near_47752 ext3_find_near 0 47752 NULL
++alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
++i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
++uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
++SyS_setgroups16_47780 SyS_setgroups16 1 47780 NULL
++error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
++posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL
++stmmac_set_bfsize_47834 stmmac_set_bfsize 0 47834 NULL
++KEY_SIZE_47855 KEY_SIZE 0 47855 NULL
++ubifs_unpack_nnode_47866 ubifs_unpack_nnode 0 47866 NULL
++vhci_read_47878 vhci_read 3 47878 NULL
++keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
++load_mapping_47904 load_mapping 3 47904 NULL
++osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
++comedi_write_47926 comedi_write 3 47926 NULL
++nvme_trans_get_blk_desc_len_47946 nvme_trans_get_blk_desc_len 0-2 47946 NULL
++lp8788_irq_map_47964 lp8788_irq_map 2 47964 NULL
++iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
++mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
++dbg_port_buf_47990 dbg_port_buf 2 47990 NULL
++ib_umad_write_47993 ib_umad_write 3 47993 NULL
++ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
++bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
++pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
++SYSC_writev_48040 SYSC_writev 3 48040 NULL
++wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
++posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
++palmas_bulk_write_48068 palmas_bulk_write 2-3-5 48068 NULL
++disc_write_48070 disc_write 3 48070 NULL
++mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
++skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
++radio_isa_common_probe_48107 radio_isa_common_probe 3 48107 NULL
++vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
++set_discoverable_48141 set_discoverable 4 48141 NULL
++dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
++bitmap_onto_48152 bitmap_onto 4 48152 NULL
++isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
++c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
++ocfs2_find_next_zero_bit_unaligned_48170 ocfs2_find_next_zero_bit_unaligned 2-3 48170 NULL nohasharray
++rbd_obj_method_sync_48170 rbd_obj_method_sync 8 48170 &ocfs2_find_next_zero_bit_unaligned_48170
++alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
++init_ipath_48187 init_ipath 1 48187 NULL
++brcmf_sdio_chip_cm3_exitdl_48192 brcmf_sdio_chip_cm3_exitdl 4 48192 NULL
++snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
++is_block_in_journal_48223 is_block_in_journal 3 48223 NULL
++uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
++nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
++read_file_recv_48232 read_file_recv 3 48232 NULL
++unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL
++nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
++cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
++send_set_info_48288 send_set_info 7 48288 NULL
++set_disc_pwup_pfs_48300 set_disc_pwup_pfs 3 48300 NULL
++lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
++timblogiw_read_48305 timblogiw_read 3 48305 NULL
++hash_setkey_48310 hash_setkey 3 48310 NULL
++__alloc_fd_48356 __alloc_fd 2 48356 NULL
++skb_add_data_48363 skb_add_data 3 48363 NULL
++tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
++ixgbe_pci_sriov_enable_48410 ixgbe_pci_sriov_enable 2 48410 NULL
++lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
++pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
++nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
++print_filtered_48442 print_filtered 2-0 48442 NULL
++tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
++r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
++send_control_msg_48498 send_control_msg 6 48498 NULL
++mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
++count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
++diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
++brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
++phantom_get_free_48514 phantom_get_free 0 48514 NULL
++wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
++ext3_splice_branch_48531 ext3_splice_branch 6 48531 NULL
++named_distribute_48544 named_distribute 4 48544 NULL
++raid10_size_48571 raid10_size 0-2-3 48571 NULL
++ufs_dtogd_48616 ufs_dtogd 0-2 48616 NULL
++do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
++mtd_read_48655 mtd_read 0 48655 NULL
++aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
++ore_get_rw_state_48667 ore_get_rw_state 4 48667 NULL
++sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL nohasharray
++sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
++altera_drscan_48698 altera_drscan 2 48698 NULL
++kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
++ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
++ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
++l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
++lua_sysfs_write_48797 lua_sysfs_write 6 48797 NULL
++il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
++twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
++atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
++efi_memory_uc_48828 efi_memory_uc 1 48828 NULL
++azx_get_position_48841 azx_get_position 0 48841 NULL
++vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
++C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray
++viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864
++__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
++sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
++ext2_alloc_branch_48889 ext2_alloc_branch 4 48889 NULL
++crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
++xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
++msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
++gdth_isa_probe_one_48925 gdth_isa_probe_one 1 48925 NULL
++sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL
++event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
++nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
++vmci_handle_arr_create_48971 vmci_handle_arr_create 1 48971 NULL
++batadv_orig_hash_del_if_48972 batadv_orig_hash_del_if 2 48972 NULL
++_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
++rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
++sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
++filemap_check_errors_49022 filemap_check_errors 0 49022 NULL
++transient_status_49027 transient_status 4 49027 NULL
++ipath_reg_user_mr_49038 ipath_reg_user_mr 2-3 49038 NULL
++setup_msi_irq_49052 setup_msi_irq 3-4 49052 NULL
++ubi_read_49061 ubi_read 0 49061 NULL
++scsi_register_49094 scsi_register 2 49094 NULL
++paging64_walk_addr_nested_49100 paging64_walk_addr_nested 3 49100 NULL
++compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
++check_exists_49119 check_exists 2 49119 NULL nohasharray
++xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 &check_exists_49119
++pt_read_49136 pt_read 3 49136 NULL
++tipc_multicast_49144 tipc_multicast 5 49144 NULL
++atyfb_setup_generic_49151 atyfb_setup_generic 3 49151 NULL
++ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
++f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL
++ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
++ext4_free_clusters_after_init_49174 ext4_free_clusters_after_init 2 49174 NULL
++dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
++iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
++il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
++do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
++resp_write_same_49217 resp_write_same 2 49217 NULL
++nouveau_therm_create__49228 nouveau_therm_create_ 4 49228 NULL
++nouveau_i2c_port_create__49237 nouveau_i2c_port_create_ 6 49237 NULL
++hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
++ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
++isku_sysfs_read_keys_media_49268 isku_sysfs_read_keys_media 6 49268 NULL
++osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
++rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
++uio_read_49300 uio_read 3 49300 NULL
++ocfs2_resmap_find_free_bits_49301 ocfs2_resmap_find_free_bits 3 49301 NULL
++isku_sysfs_read_keys_macro_49312 isku_sysfs_read_keys_macro 6 49312 NULL
++SYSC_mincore_49319 SYSC_mincore 1 49319 NULL
++fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL
++srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-3-4 49330 NULL
++cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
++joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
++ocfs2_remove_btree_range_49370 ocfs2_remove_btree_range 4-5-3 49370 NULL
++px_raw_event_49371 px_raw_event 4 49371 NULL
++iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
++applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
++rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
++tnode_alloc_49407 tnode_alloc 1 49407 NULL
++samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
++md_domain_init_49432 md_domain_init 2 49432 NULL
++compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL
++get_lru_size_49441 get_lru_size 0 49441 NULL
++agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
++xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
++savu_sysfs_read_49473 savu_sysfs_read 6 49473 NULL
++isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
++SyS_listxattr_49519 SyS_listxattr 3 49519 NULL
++emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL
++acpi_os_ioremap_49523 acpi_os_ioremap 1-2 49523 NULL
++smk_write_access_49561 smk_write_access 3 49561 NULL
++ntfs_malloc_nofs_49572 ntfs_malloc_nofs 1 49572 NULL
++alloc_chunk_49575 alloc_chunk 1 49575 NULL
++sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
++tap_write_49595 tap_write 3 49595 NULL
++isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
++btrfs_mksubvol_49616 btrfs_mksubvol 3 49616 NULL
++heap_init_49617 heap_init 2 49617 NULL
++smk_write_doi_49621 smk_write_doi 3 49621 NULL
++btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
++aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
++SyS_pwritev_49688 SyS_pwritev 3 49688 NULL
++sys_gethostname_49698 sys_gethostname 2 49698 NULL
++cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
++dm_thin_insert_block_49720 dm_thin_insert_block 2-3 49720 NULL
++sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
++sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
++check_frame_49741 check_frame 0 49741 NULL
++zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
++btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
++fuse_wr_pages_49753 fuse_wr_pages 0-1-2 49753 NULL
++key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
++fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
++isku_sysfs_write_49767 isku_sysfs_write 6-5 49767 NULL
++ceph_osdc_readpages_49789 ceph_osdc_readpages 10-4 49789 NULL
++nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
++arch_gnttab_map_status_49812 arch_gnttab_map_status 3 49812 NULL
++ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
++add_uuid_49831 add_uuid 4 49831 NULL
++tcf_csum_ipv4_tcp_49834 tcf_csum_ipv4_tcp 3 49834 NULL
++ath6kl_fwlog_block_read_49836 ath6kl_fwlog_block_read 3 49836 NULL
++twl4030_write_49846 twl4030_write 2 49846 NULL
++scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
++timeradd_entry_49850 timeradd_entry 3 49850 NULL
++btrfs_subvolume_reserve_metadata_49859 btrfs_subvolume_reserve_metadata 3 49859 NULL
++sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
++ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
++__cow_file_range_49901 __cow_file_range 5 49901 NULL
++__copy_from_user_inatomic_nocache_49921 __copy_from_user_inatomic_nocache 3 49921 NULL
++batadv_tt_realloc_packet_buff_49960 batadv_tt_realloc_packet_buff 4 49960 NULL
++b43legacy_pio_read_49978 b43legacy_pio_read 0 49978 NULL
++ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
++sta2x11_swiotlb_alloc_coherent_49994 sta2x11_swiotlb_alloc_coherent 2 49994 NULL
++l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
++__module_alloc_50004 __module_alloc 1 50004 NULL
++dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
++ptrace_readdata_50020 ptrace_readdata 2-4 50020 NULL
++isdn_read_50021 isdn_read 3 50021 NULL
++qp_alloc_queue_50028 qp_alloc_queue 1 50028 NULL
++alloc_ebda_hpc_50046 alloc_ebda_hpc 1-2 50046 NULL
++vmw_surface_destroy_size_50072 vmw_surface_destroy_size 0 50072 NULL
++arch_setup_ht_irq_50073 arch_setup_ht_irq 1 50073 NULL
++dev_set_alias_50084 dev_set_alias 3 50084 NULL
++pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
++sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
++altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
++read_file_slot_50111 read_file_slot 3 50111 NULL
++SYSC_preadv_50134 SYSC_preadv 3 50134 NULL
++copy_items_50140 copy_items 6 50140 NULL
++tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
++set_cmd_header_50155 set_cmd_header 0 50155 NULL
++reiserfs_bmap_count_50160 reiserfs_bmap_count 0 50160 NULL
++aac_nark_ioremap_50163 aac_nark_ioremap 2 50163 NULL nohasharray
++kmalloc_node_50163 kmalloc_node 1 50163 &aac_nark_ioremap_50163
++rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
++odev_update_50169 odev_update 2 50169 NULL
++ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 NULL nohasharray
++ubi_resize_volume_50172 ubi_resize_volume 2 50172 &ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172
++ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
++cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
++rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
++ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
++mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
++sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
++afs_extract_data_50261 afs_extract_data 5 50261 NULL
++rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
++soc_codec_reg_show_50302 soc_codec_reg_show 0 50302 NULL
++SYSC_flistxattr_50307 SYSC_flistxattr 3 50307 NULL
++SYSC_sched_setaffinity_50310 SYSC_sched_setaffinity 2 50310 NULL
++soc_camera_read_50319 soc_camera_read 3 50319 NULL
++do_launder_page_50329 do_launder_page 0 50329 NULL
++nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
++lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
++ocfs2_block_to_cluster_group_50337 ocfs2_block_to_cluster_group 2 50337 NULL nohasharray
++snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 &ocfs2_block_to_cluster_group_50337
++roccat_common2_send_with_status_50343 roccat_common2_send_with_status 4 50343 NULL
++tpm_read_50344 tpm_read 3 50344 NULL
++sched_clock_remote_50347 sched_clock_remote 0 50347 NULL
++kvm_arch_create_memslot_50354 kvm_arch_create_memslot 2 50354 NULL
++isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
++unpack_u16_chunk_50357 unpack_u16_chunk 0 50357 NULL
++xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
++roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
++sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
++hash_ip6_expire_50390 hash_ip6_expire 3 50390 NULL
++l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
++ceph_writepages_osd_request_50423 ceph_writepages_osd_request 5 50423 NULL
++iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
++validate_acl_mac_addrs_50429 validate_acl_mac_addrs 0 50429 NULL
++btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
++calc_csum_metadata_size_50448 calc_csum_metadata_size 0 50448 NULL
++pgctrl_write_50453 pgctrl_write 3 50453 NULL
++force_mapping_50464 force_mapping 2 50464 NULL
++cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
++mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL
++pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
++fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
++ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
++hme_read_desc32_50574 hme_read_desc32 0 50574 NULL
++fat_readpages_50582 fat_readpages 4 50582 NULL
++iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
++build_inv_iommu_pages_50589 build_inv_iommu_pages 2-3 50589 NULL
++sge_rx_50594 sge_rx 3 50594 NULL
++rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
++__ffs_50625 __ffs 0 50625 NULL
++regcache_rbtree_write_50629 regcache_rbtree_write 2 50629 NULL
++simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
++ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL nohasharray
++sys_readv_50664 sys_readv 3 50664 &ath6kl_tm_rx_event_50664
++bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
++ext2_try_to_allocate_with_rsv_50669 ext2_try_to_allocate_with_rsv 4-2 50669 NULL
++btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
++swiotlb_free_coherent_50690 swiotlb_free_coherent 4 50690 NULL
++paging32_gva_to_gpa_50696 paging32_gva_to_gpa 2 50696 NULL
++xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL
++blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
++__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
++skb_padto_50759 skb_padto 2 50759 NULL
++ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
++tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL
++bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
++tpm_write_50798 tpm_write 3 50798 NULL
++write_flush_50803 write_flush 3 50803 NULL
++dvb_play_50814 dvb_play 3 50814 NULL
++dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
++acpi_ev_install_gpe_block_50829 acpi_ev_install_gpe_block 2 50829 NULL
++SetArea_50835 SetArea 4 50835 NULL nohasharray
++create_mem_extents_50835 create_mem_extents 0 50835 &SetArea_50835
++videobuf_dma_init_user_50839 videobuf_dma_init_user 3 50839 NULL
++self_check_write_50856 self_check_write 5 50856 NULL
++carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
++SyS_lgetxattr_50889 SyS_lgetxattr 4 50889 NULL
++netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
++osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
++xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
++blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
++hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
++chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
++ocfs2_add_refcount_flag_50952 ocfs2_add_refcount_flag 6 50952 NULL
++SyS_setxattr_50957 SyS_setxattr 4 50957 NULL
++iwl_statistics_flag_50981 iwl_statistics_flag 0-3 50981 NULL
++timeout_write_50991 timeout_write 3 50991 NULL
++wm831x_irq_map_50995 wm831x_irq_map 2 50995 NULL
++proc_write_51003 proc_write 3 51003 NULL
++snd_pcm_default_page_ops_51021 snd_pcm_default_page_ops 2 51021 NULL
++lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
++ntfs_attr_find_51028 ntfs_attr_find 0 51028 NULL nohasharray
++fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 &ntfs_attr_find_51028
++BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
++dump_midi_51040 dump_midi 3 51040 NULL
++srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
++do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
++wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
++jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
++solo_enc_v4l2_init_51094 solo_enc_v4l2_init 2 51094 NULL
++__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
++ti_recv_51110 ti_recv 3 51110 NULL
++dgrp_net_read_51113 dgrp_net_read 3 51113 NULL
++nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
++alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
++simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
++set_dirty_51144 set_dirty 3 51144 NULL
++xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
++compat_sys_pwritev64_51151 compat_sys_pwritev64 3 51151 NULL
++blk_bio_map_sg_51213 blk_bio_map_sg 0 51213 NULL
++nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
++snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
++tipc_send_51238 tipc_send 4 51238 NULL
++drm_property_create_51239 drm_property_create 4 51239 NULL
++st_read_51251 st_read 3 51251 NULL
++compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
++dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
++ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
++zone_reclaimable_pages_51283 zone_reclaimable_pages 0 51283 NULL
++pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
++bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
++get_cell_51316 get_cell 2 51316 NULL
++init_map_ipmac_51317 init_map_ipmac 4-3-5 51317 NULL
++alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
++ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
++alloc_smp_req_51337 alloc_smp_req 1 51337 NULL nohasharray
++compat_arch_ptrace_51337 compat_arch_ptrace 3-4 51337 &alloc_smp_req_51337
++ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
++ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
++radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
++ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
++ceph_sync_read_51410 ceph_sync_read 3 51410 NULL
++x86_swiotlb_free_coherent_51421 x86_swiotlb_free_coherent 4 51421 NULL
++blk_register_region_51424 blk_register_region 1-2 51424 NULL
++mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
++ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
++print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
++____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
++xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
++vaddr_51480 vaddr 0 51480 NULL
++skb_inner_mac_header_51482 skb_inner_mac_header 0 51482 NULL nohasharray
++btrfs_find_space_cluster_51482 btrfs_find_space_cluster 5 51482 &skb_inner_mac_header_51482
++__cpa_process_fault_51502 __cpa_process_fault 2 51502 NULL
++ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
++load_pdptrs_51541 load_pdptrs 3 51541 NULL
++__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
++icmp_manip_pkt_51560 icmp_manip_pkt 4 51560 NULL
++ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
++aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
++raw_ioctl_51607 raw_ioctl 3 51607 NULL
++table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
++gnttab_end_foreign_access_51617 gnttab_end_foreign_access 3 51617 NULL
++dns_resolve_server_name_to_ip_51632 dns_resolve_server_name_to_ip 0 51632 NULL
++sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
++iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
++get_new_cssid_51665 get_new_cssid 2 51665 NULL
++ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
++sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
++sfi_sysfs_install_table_51688 sfi_sysfs_install_table 1 51688 NULL
++host_mapping_level_51696 host_mapping_level 2 51696 NULL
++sel_write_access_51704 sel_write_access 3 51704 NULL
++tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
++v9fs_alloc_rdir_buf_51716 v9fs_alloc_rdir_buf 2 51716 NULL
++drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
++sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
++hid_parse_report_51737 hid_parse_report 3 51737 NULL
++get_user_pages_fast_51751 get_user_pages_fast 0-1-2 51751 NULL
++ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
++if_write_51756 if_write 3 51756 NULL
++ioremap_prot_51764 ioremap_prot 1-2 51764 NULL
++iio_buffer_add_channel_sysfs_51766 iio_buffer_add_channel_sysfs 0 51766 NULL
++to_ratio_51809 to_ratio 2-1 51809 NULL
++qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
++buffer_from_user_51826 buffer_from_user 3 51826 NULL
++ioread32_51847 ioread32 0 51847 NULL nohasharray
++read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
++do_readv_writev_51849 do_readv_writev 4 51849 NULL
++SYSC_sendto_51852 SYSC_sendto 6 51852 NULL
++pointer_size_read_51863 pointer_size_read 3 51863 NULL
++mlx4_alloc_db_from_pgdir_51865 mlx4_alloc_db_from_pgdir 3 51865 NULL
++get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
++user_read_51881 user_read 3 51881 NULL
++memblock_alloc_51884 memblock_alloc 1-2 51884 NULL
++dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
++virt_to_phys_51896 virt_to_phys 0 51896 NULL
++wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
++dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
++__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL
++xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
++irq_dispose_mapping_51941 irq_dispose_mapping 1 51941 NULL
++scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
++arizona_free_irq_51969 arizona_free_irq 2 51969 NULL nohasharray
++snd_mask_min_51969 snd_mask_min 0 51969 &arizona_free_irq_51969
++ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
++dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
++skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
++rdmalt_52022 rdmalt 0 52022 NULL
++vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
++override_release_52032 override_release 2 52032 NULL
++end_port_52042 end_port 0 52042 NULL
++dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
++msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
++dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
++__fuse_request_alloc_52060 __fuse_request_alloc 1 52060 NULL
++isofs_readpages_52067 isofs_readpages 4 52067 NULL
++nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
++o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
++smsdvb_stats_read_52114 smsdvb_stats_read 3 52114 NULL
++retry_count_read_52129 retry_count_read 3 52129 NULL
++zram_meta_alloc_52140 zram_meta_alloc 1 52140 NULL
++hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL nohasharray
++ext2_alloc_blocks_52145 ext2_alloc_blocks 2 52145 &hysdn_conf_write_52145
++htable_size_52148 htable_size 0-1 52148 NULL
++__le16_to_cpup_52155 __le16_to_cpup 0 52155 NULL nohasharray
++smk_write_load2_52155 smk_write_load2 3 52155 &__le16_to_cpup_52155
++alix_present_52165 alix_present 1 52165 NULL
++ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
++mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
++print_prefix_52176 print_prefix 0 52176 NULL
++proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
++vmci_qp_broker_alloc_52216 vmci_qp_broker_alloc 5-6 52216 NULL
++do_dmabuf_dirty_ldu_52241 do_dmabuf_dirty_ldu 6 52241 NULL
++fuse_request_alloc_52243 fuse_request_alloc 1 52243 NULL
++pm80x_request_irq_52250 pm80x_request_irq 2 52250 NULL
++mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
++shrink_slab_52261 shrink_slab 2-3 52261 NULL
++hva_to_pfn_slow_52262 hva_to_pfn_slow 1 52262 NULL
++sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
++atomic64_read_52300 atomic64_read 0 52300 NULL
++ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
++read_file_reset_52310 read_file_reset 3 52310 NULL
++request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
++hwflags_read_52318 hwflags_read 3 52318 NULL
++ntfs_rl_split_52328 ntfs_rl_split 2-4 52328 NULL
++test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
++compat_SyS_preadv64_52351 compat_SyS_preadv64 3 52351 NULL
++bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
++copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
++mq_emit_config_values_52378 mq_emit_config_values 3 52378 NULL
++isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
++jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
++aer_inject_write_52399 aer_inject_write 3 52399 NULL
++aac_rx_ioremap_52410 aac_rx_ioremap 2 52410 NULL
++cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
++line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
++delay_status_52431 delay_status 5 52431 NULL
++ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
++nl80211_send_mgmt_tx_status_52445 nl80211_send_mgmt_tx_status 5 52445 NULL
++ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
++ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
++fd_do_rw_52495 fd_do_rw 3 52495 NULL nohasharray
++skb_cow_head_52495 skb_cow_head 2 52495 &fd_do_rw_52495
++qib_user_sdma_pin_pages_52498 qib_user_sdma_pin_pages 3-5 52498 NULL
++int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
++qlcnic_83xx_sysfs_flash_write_52507 qlcnic_83xx_sysfs_flash_write 4 52507 NULL
++pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
++dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
++from_oblock_52546 from_oblock 0-1 52546 NULL
++dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
++ocfs2_make_right_split_rec_52562 ocfs2_make_right_split_rec 3 52562 NULL
++emit_code_52583 emit_code 0-3 52583 NULL
++isku_sysfs_read_macro_52587 isku_sysfs_read_macro 6 52587 NULL
++tps80031_writes_52638 tps80031_writes 3-4 52638 NULL
++brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
++SYSC_gethostname_52677 SYSC_gethostname 2 52677 NULL
++nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL
++nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
++cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
++blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
++relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
++carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
++ieee80211_if_read_beacon_timeout_52756 ieee80211_if_read_beacon_timeout 3 52756 NULL
++copy_ctr_args_52761 copy_ctr_args 2 52761 NULL
++pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
++ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
++mon_bin_get_event_52863 mon_bin_get_event 4 52863 NULL
++twlreg_write_52880 twlreg_write 3 52880 NULL
++pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
++cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
++kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
++arizona_request_irq_52908 arizona_request_irq 2 52908 NULL
++__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
++iblock_get_bio_52936 iblock_get_bio 3 52936 NULL nohasharray
++__iio_device_attr_init_52936 __iio_device_attr_init 0 52936 &iblock_get_bio_52936
++__nodes_remap_52951 __nodes_remap 5 52951 NULL
++send_packet_52960 send_packet 4 52960 NULL
++ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
++hx8357_spi_write_then_read_52964 hx8357_spi_write_then_read 3 52964 NULL nohasharray
++compat_sock_ioctl_52964 compat_sock_ioctl 3 52964 &hx8357_spi_write_then_read_52964
++tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
++num_node_state_52989 num_node_state 0 52989 NULL
++batadv_check_management_packet_52993 batadv_check_management_packet 3 52993 NULL
++efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
++btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
++tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
++ext4_meta_bg_first_group_53031 ext4_meta_bg_first_group 0-2 53031 NULL
++bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
++regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL
++cfi_read_query_53066 cfi_read_query 0 53066 NULL
++mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
++qib_resize_cq_53090 qib_resize_cq 2 53090 NULL
++verity_status_53120 verity_status 5 53120 NULL
++brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
++ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
++ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL
++btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL
++clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
++mtdoops_erase_block_53206 mtdoops_erase_block 2 53206 NULL
++fixup_user_fault_53210 fixup_user_fault 3 53210 NULL
++tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
++rbd_obj_method_sync_53252 rbd_obj_method_sync 8 53252 NULL
++xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5 53258 NULL
++wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL
++btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
++isku_sysfs_write_key_mask_53305 isku_sysfs_write_key_mask 6 53305 NULL
++batadv_interface_rx_53325 batadv_interface_rx 4 53325 NULL
++gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
++vm_mmap_53339 vm_mmap 0 53339 NULL
++sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL
++get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
++iwl_pcie_txq_alloc_53413 iwl_pcie_txq_alloc 3 53413 NULL
++isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
++mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
++apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
++acpi_tb_parse_root_table_53455 acpi_tb_parse_root_table 1 53455 NULL
++n2_run_53459 n2_run 3 53459 NULL
++paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL
++rds_tcp_data_recv_53476 rds_tcp_data_recv 3 53476 NULL
++iowarrior_read_53483 iowarrior_read 3 53483 NULL
++osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
++do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
++snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
++dbAllocNext_53506 dbAllocNext 0 53506 NULL
++ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
++check_acl_53512 check_acl 0 53512 NULL
++alloc_pages_exact_nid_53515 alloc_pages_exact_nid 2 53515 NULL
++SYSC_bind_53582 SYSC_bind 3 53582 NULL nohasharray
++set_registers_53582 set_registers 3 53582 &SYSC_bind_53582
++cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL
++gfn_to_pfn_async_53597 gfn_to_pfn_async 2 53597 NULL
++___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1-2 53626 NULL
++xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
++ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
++nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
++_preload_range_53676 _preload_range 2-3 53676 NULL
++lowpan_fragment_xmit_53680 lowpan_fragment_xmit 3-4 53680 NULL
++fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
++v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
++bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray
++igb_alloc_q_vector_53690 igb_alloc_q_vector 4-6 53690 &bdev_logical_block_size_53690
++find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
++bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
++__ocfs2_resv_find_window_53721 __ocfs2_resv_find_window 3 53721 NULL
++wdm_write_53735 wdm_write 3 53735 NULL
++ext3_try_to_allocate_with_rsv_53737 ext3_try_to_allocate_with_rsv 5-3 53737 NULL
++da9052_disable_irq_53745 da9052_disable_irq 2 53745 NULL
++lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 NULL nohasharray
++amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 &lpfc_idiag_queacc_read_qe_53755
++ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
++__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
++qp_alloc_host_work_53798 qp_alloc_host_work 3-5 53798 NULL
++__tty_alloc_driver_53799 __tty_alloc_driver 1 53799 NULL
++regmap_raw_write_53803 regmap_raw_write 2-4 53803 NULL
++lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
++nls_nullsize_53815 nls_nullsize 0 53815 NULL
++pms_read_53873 pms_read 3 53873 NULL
++ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
++ocfs2_rm_xattr_cluster_53900 ocfs2_rm_xattr_cluster 5-4-3 53900 NULL nohasharray
++SyS_setgroups_53900 SyS_setgroups 1 53900 &ocfs2_rm_xattr_cluster_53900
++proc_file_read_53905 proc_file_read 3 53905 NULL
++early_reserve_e820_53915 early_reserve_e820 1-2 53915 NULL
++ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 4 53938 NULL
++mthca_setup_cmd_doorbells_53954 mthca_setup_cmd_doorbells 2 53954 NULL
++mlx4_num_eq_uar_53965 mlx4_num_eq_uar 0 53965 NULL
++idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
++mthca_reg_user_mr_53980 mthca_reg_user_mr 2-3 53980 NULL
++__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
++ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval 3 53998 NULL
++snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 4-2-5 54018 NULL
++cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
++ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
++pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
++nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
++rproc_state_read_54057 rproc_state_read 3 54057 NULL
++btrfs_start_transaction_54066 btrfs_start_transaction 2 54066 NULL
++_malloc_54077 _malloc 1 54077 NULL
++bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
++altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
++create_xattr_54106 create_xattr 5 54106 NULL
++inc_zcache_pers_zbytes_54107 inc_zcache_pers_zbytes 1 54107 NULL
++strn_len_54122 strn_len 0 54122 NULL
++isku_receive_54130 isku_receive 4 54130 NULL
++isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
++i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
++memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
++nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL
++acpi_os_read_memory_54186 acpi_os_read_memory 1-3 54186 NULL
++SyS_ipc_54206 SyS_ipc 3 54206 NULL
++__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
++_format_mac_addr_54229 _format_mac_addr 2-0 54229 NULL
++pi_read_regr_54231 pi_read_regr 0 54231 NULL
++reada_add_block_54247 reada_add_block 2 54247 NULL
++xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
++ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
++wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
++audio_write_54261 audio_write 4 54261 &wusb_prf_54261
++mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
++ubi_calc_data_len_54279 ubi_calc_data_len 0-3 54279 NULL
++altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
++dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
++get_iovec_page_array_54298 get_iovec_page_array 6 54298 NULL
++sprintf_54306 sprintf 0 54306 NULL
++irq_domain_associate_many_54307 irq_domain_associate_many 2 54307 NULL
++br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
++__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
++__get_free_pages_54352 __get_free_pages 0 54352 NULL nohasharray
++_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 &__get_free_pages_54352
++tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
++read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
++vfs_readlink_54368 vfs_readlink 3 54368 NULL
++do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
++intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
++ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
++gart_unmap_page_54379 gart_unmap_page 2-3 54379 NULL
++snd_pcm_oss_read2_54387 snd_pcm_oss_read2 0-3 54387 NULL
++i386_mmap_check_54388 i386_mmap_check 0 54388 NULL
++__do_krealloc_54389 __do_krealloc 2 54389 NULL
++iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
++copy_gadget_strings_54417 copy_gadget_strings 2-3 54417 NULL
++swiotlb_tbl_sync_single_54486 swiotlb_tbl_sync_single 2 54486 NULL
++simple_strtoull_54493 simple_strtoull 0 54493 NULL
++swiotlb_tbl_map_single_54495 swiotlb_tbl_map_single 3-0 54495 NULL
++btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
++cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL nohasharray
++xen_bus_to_phys_54514 xen_bus_to_phys 0 54514 &cgroup_write_X64_54514
++rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
++vmci_transport_dgram_enqueue_54525 vmci_transport_dgram_enqueue 4 54525 NULL
++viacam_read_54526 viacam_read 3 54526 NULL
++unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
++setsockopt_54539 setsockopt 5 54539 NULL
++mwifiex_usb_submit_rx_urb_54558 mwifiex_usb_submit_rx_urb 2 54558 NULL
++SYSC_setsockopt_54561 SYSC_setsockopt 5 54561 NULL
++nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
++fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
++nvme_npages_54601 nvme_npages 0-1 54601 NULL
++fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
++irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
++dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
++twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL
++kvm_read_cr3_54662 kvm_read_cr3 0 54662 NULL
++bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
++vring_new_virtqueue_54673 vring_new_virtqueue 2 54673 NULL
++evm_read_key_54674 evm_read_key 3 54674 NULL
++resource_string_54699 resource_string 0 54699 NULL
++platform_get_irq_byname_54700 platform_get_irq_byname 0 54700 NULL
++rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL nohasharray
++compat_SyS_readv_54711 compat_SyS_readv 3 54711 &rfkill_fop_read_54711
++_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
++ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
++kzalloc_54740 kzalloc 1 54740 NULL
++wep_iv_read_54744 wep_iv_read 3 54744 NULL
++iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
++batadv_iv_ogm_aggregate_new_54761 batadv_iv_ogm_aggregate_new 2 54761 NULL
++adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL
++flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
++domain_init_54797 domain_init 2 54797 NULL
++ext3_find_goal_54801 ext3_find_goal 0 54801 NULL
++get_dev_size_54807 get_dev_size 0 54807 NULL
++nfsd_write_54809 nfsd_write 6 54809 NULL
++aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL nohasharray
++crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 &aes_decrypt_fail_read_54815
++generic_perform_write_54832 generic_perform_write 3 54832 NULL
++write_rio_54837 write_rio 3 54837 NULL
++nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 NULL nohasharray
++ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 &nouveau_engctx_create__54839
++ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
++printer_read_54851 printer_read 3 54851 NULL
++qib_reg_user_mr_54858 qib_reg_user_mr 2-3 54858 NULL
++alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
++broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
++prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
++tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
++iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL
++btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
++mxms_structlen_54939 mxms_structlen 0 54939 NULL
++add_port_54941 add_port 2 54941 NULL
++virtblk_add_buf_wait_54943 virtblk_add_buf_wait 3-4 54943 NULL
++wl12xx_cmd_build_probe_req_54946 wl12xx_cmd_build_probe_req 6-8 54946 NULL
++ath9k_dump_btcoex_54949 ath9k_dump_btcoex 0 54949 NULL
++c4_add_card_54968 c4_add_card 3 54968 NULL
++iwl_pcie_dump_fh_54975 iwl_pcie_dump_fh 0 54975 NULL
++__proc_file_read_54978 __proc_file_read 3 54978 NULL
++ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
++Bus_to_Virtual_54991 Bus_to_Virtual 1 54991 NULL
++mem_cgroup_get_lru_size_55008 mem_cgroup_get_lru_size 0 55008 NULL
++cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
++paging32_get_level1_sp_gpa_55022 paging32_get_level1_sp_gpa 0 55022 NULL
++error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
++__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
++apei_exec_run_55075 apei_exec_run 0 55075 NULL
++bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
++hx8357_spi_write_array_55095 hx8357_spi_write_array 3 55095 NULL
++rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
++corrupt_data_55120 corrupt_data 0 55120 NULL
++crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
++ocfs2_prepare_refcount_change_for_del_55137 ocfs2_prepare_refcount_change_for_del 3 55137 NULL nohasharray
++filldir_55137 filldir 3 55137 &ocfs2_prepare_refcount_change_for_del_55137
++ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
++ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
++mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL
++sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
++sched_feat_write_55202 sched_feat_write 3 55202 NULL
++ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL
++__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
++do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
++qxl_alloc_client_monitors_config_55216 qxl_alloc_client_monitors_config 2 55216 NULL
++nouveau_mc_create__55217 nouveau_mc_create_ 4 55217 NULL
++dump_command_55220 dump_command 1 55220 NULL
++dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
++tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
++hash_netport6_expire_55232 hash_netport6_expire 3 55232 NULL
++register_unifi_sdio_55239 register_unifi_sdio 2 55239 NULL
++memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
++persistent_ram_new_55286 persistent_ram_new 1-2 55286 NULL
++ptrace_request_55288 ptrace_request 3-4 55288 NULL
++rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
++gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
++qp_alloc_guest_work_55305 qp_alloc_guest_work 3-5 55305 NULL nohasharray
++__get_vm_area_node_55305 __get_vm_area_node 1 55305 &qp_alloc_guest_work_55305
++do_shmat_55336 do_shmat 5 55336 NULL
++vme_user_read_55338 vme_user_read 3 55338 NULL
++sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL nohasharray
++__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 &sctp_datamsg_from_user_55342
++__memblock_alloc_base_55359 __memblock_alloc_base 1-2 55359 NULL
++acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
++nf_nat_ipv4_manip_pkt_55387 nf_nat_ipv4_manip_pkt 2 55387 NULL
++iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
++si476x_radio_read_rds_blckcnt_blob_55427 si476x_radio_read_rds_blckcnt_blob 3 55427 NULL
++alloc_skb_55439 alloc_skb 1 55439 NULL
++__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
++isdnhdlc_decode_55466 isdnhdlc_decode 0 55466 NULL
++cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
++batadv_unicast_push_and_fill_skb_55474 batadv_unicast_push_and_fill_skb 2 55474 NULL
++snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
++i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
++ocfs2_rec_clusters_55501 ocfs2_rec_clusters 0 55501 NULL
++ext4_flex_bg_size_55502 ext4_flex_bg_size 0 55502 NULL
++cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL nohasharray
++tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 &cfpkt_pad_trail_55511
++ea_get_55522 ea_get 0 55522 NULL
++buffer_size_55534 buffer_size 0 55534 NULL
++set_msr_interception_55538 set_msr_interception 2 55538 NULL
++tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
++hash_netport4_expire_55584 hash_netport4_expire 3 55584 NULL
++add_partition_55588 add_partition 2 55588 NULL
++SyS_keyctl_55602 SyS_keyctl 4 55602 NULL
++free_pages_55603 free_pages 1 55603 NULL
++macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
++selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
++edge_tty_recv_55622 edge_tty_recv 3 55622 NULL
++reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 NULL nohasharray
++pktgen_if_write_55628 pktgen_if_write 3 55628 &reiserfs_xattr_get_55628
++dvb_dmxdev_set_buffer_size_55643 dvb_dmxdev_set_buffer_size 2 55643 NULL
++mlx4_buddy_alloc_55647 mlx4_buddy_alloc 2 55647 NULL
++xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
++ib_umad_compat_ioctl_55650 ib_umad_compat_ioctl 3 55650 NULL
++cfg80211_send_rx_assoc_55651 cfg80211_send_rx_assoc 4 55651 NULL
++read_oldmem_55658 read_oldmem 3 55658 NULL
++lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
++il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
++get_info_55681 get_info 3 55681 NULL
++wil_vring_alloc_skb_55703 wil_vring_alloc_skb 4 55703 NULL
++__videobuf_alloc_uncached_55711 __videobuf_alloc_uncached 1 55711 NULL
++pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
++mtdswap_init_55719 mtdswap_init 2 55719 NULL
++tap_pwup_write_55723 tap_pwup_write 3 55723 NULL
++__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2 55738 NULL
++set_local_name_55757 set_local_name 4 55757 NULL
++strlen_55778 strlen 0 55778 NULL
++set_spte_55783 set_spte 5-4 55783 NULL
++req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray
++conf_read_55786 conf_read 3 55786 &req_bio_endio_55786
++uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
++sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
++ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
++hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
++shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
++hsc_write_55875 hsc_write 3 55875 NULL
++pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
++snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
++paging64_page_fault_55942 paging64_page_fault 2 55942 NULL
++sel_read_policy_55947 sel_read_policy 3 55947 NULL
++ceph_get_direct_page_vector_55956 ceph_get_direct_page_vector 2 55956 NULL
++simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
++tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
++ssb_bus_pcmciabus_register_56020 ssb_bus_pcmciabus_register 3 56020 NULL
++nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL
++dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
++__set_discard_56081 __set_discard 2 56081 NULL
++pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
++usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
++kmem_zalloc_large_56128 kmem_zalloc_large 1 56128 NULL
++sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
++map_addr_56144 map_addr 7 56144 NULL
++__i2c_transfer_56162 __i2c_transfer 0 56162 NULL
++rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
++create_irq_nr_56180 create_irq_nr 1 56180 NULL
++ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 0 56194 NULL
++skb_headroom_56200 skb_headroom 0 56200 NULL
++usb_dump_iad_descriptor_56204 usb_dump_iad_descriptor 0 56204 NULL
++ncp_read_bounce_size_56221 ncp_read_bounce_size 0-1 56221 NULL
++vring_add_indirect_56222 vring_add_indirect 4 56222 NULL
++ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
++cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
++do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
++fd_copyin_56247 fd_copyin 3 56247 NULL
++sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL
++il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
++ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL
++RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
++dvb_aplay_56296 dvb_aplay 3 56296 NULL
++btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
++compat_cdrom_read_audio_56304 compat_cdrom_read_audio 4 56304 NULL
++pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
++journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
++snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
++sixpack_compat_ioctl_56346 sixpack_compat_ioctl 4 56346 NULL
++vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
++iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4 56368 NULL
++dev_read_56369 dev_read 3 56369 NULL
++write_gssp_56404 write_gssp 3 56404 NULL
++ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
++__get_vm_area_caller_56416 __get_vm_area_caller 1 56416 NULL nohasharray
++acpi_os_write_memory_56416 acpi_os_write_memory 1-3 56416 &__get_vm_area_caller_56416
++store_msg_56417 store_msg 3 56417 NULL
++pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
++fl_create_56435 fl_create 5 56435 NULL
++gnttab_map_56439 gnttab_map 2 56439 NULL
++cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2 56453 NULL
++set_connectable_56458 set_connectable 4 56458 NULL
++osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
++putused_user_56467 putused_user 3 56467 NULL
++calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
++global_rt_period_56476 global_rt_period 0 56476 NULL
++crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
++ieee80211_rx_mgmt_probe_beacon_56491 ieee80211_rx_mgmt_probe_beacon 3 56491 NULL
++init_map_ip_56508 init_map_ip 5 56508 NULL
++cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
++ip_options_get_56538 ip_options_get 4 56538 NULL
++ocfs2_change_extent_flag_56549 ocfs2_change_extent_flag 5 56549 NULL
++alloc_apertures_56561 alloc_apertures 1 56561 NULL
++rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
++portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
++event_filter_write_56609 event_filter_write 3 56609 NULL
++gather_array_56641 gather_array 3 56641 NULL
++uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
++snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL nohasharray
++da9055_gpio_to_irq_56686 da9055_gpio_to_irq 2 56686 &snd_gus_dram_read_56686
++build_map_info_56696 build_map_info 2 56696 NULL
++dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3 56702 NULL
++sta_flags_read_56710 sta_flags_read 3 56710 NULL
++ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
++__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
++pcpu_populate_chunk_56741 pcpu_populate_chunk 2-3 56741 NULL
++drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
++btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
++alloc_iommu_56778 alloc_iommu 2-3 56778 NULL
++__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
++hash_lookup_56792 hash_lookup 2 56792 NULL
++do_syslog_56807 do_syslog 3 56807 NULL
++mtdchar_write_56831 mtdchar_write 3 56831 NULL nohasharray
++ntfs_rl_realloc_56831 ntfs_rl_realloc 3 56831 &mtdchar_write_56831
++snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4 56847 NULL
++si476x_radio_read_agc_blob_56849 si476x_radio_read_agc_blob 3 56849 NULL
++wb_lookup_56858 wb_lookup 2 56858 NULL
++ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
++pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
++debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
++batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL
++strcspn_56913 strcspn 0 56913 NULL
++__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
++check_header_56930 check_header 2 56930 NULL
++journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
++diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
++nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
++vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
++btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
++aircable_process_packet_57027 aircable_process_packet 4 57027 NULL
++skb_network_offset_57043 skb_network_offset 0 57043 NULL nohasharray
++ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 &skb_network_offset_57043
++bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
++xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL
++autofs_dev_ioctl_compat_57059 autofs_dev_ioctl_compat 3 57059 NULL
++cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
++sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
++pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
++sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
++tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
++crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
++sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
++cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
++nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 NULL nohasharray
++rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 &nl80211_send_deauth_57136 nohasharray
++ima_show_htable_value_57136 ima_show_htable_value 2 57136 &rds_ib_sub_signaled_57136
++snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
++udl_prime_create_57159 udl_prime_create 2 57159 NULL
++__ipath_get_user_pages_57166 __ipath_get_user_pages 1-2 57166 NULL
++stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
++rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
++tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
++dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL
++flush_space_57241 flush_space 3 57241 NULL
++ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
++oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
++alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
++security_mmap_file_57268 security_mmap_file 0 57268 NULL
++pstore_file_read_57288 pstore_file_read 3 57288 NULL
++snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
++ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
++fw_file_size_57307 fw_file_size 0 57307 NULL
++ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
++__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
++ocfs2_xattr_shrink_size_57328 ocfs2_xattr_shrink_size 3 57328 NULL
++check_mirror_57342 check_mirror 1-2 57342 NULL nohasharray
++usblp_read_57342 usblp_read 3 57342 &check_mirror_57342
++print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
++tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
++max8997_irq_domain_map_57375 max8997_irq_domain_map 2 57375 NULL
++tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
++read_file_blob_57406 read_file_blob 3 57406 NULL
++enclosure_register_57412 enclosure_register 3 57412 NULL
++gre_manip_pkt_57416 gre_manip_pkt 4 57416 NULL
++compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
++alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
++copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
++sys_pselect6_57449 sys_pselect6 1 57449 NULL
++ReadReg_57453 ReadReg 0 57453 NULL
++__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
++crypto_tfm_alg_blocksize_57463 crypto_tfm_alg_blocksize 0 57463 NULL nohasharray
++send_midi_async_57463 send_midi_async 3 57463 &crypto_tfm_alg_blocksize_57463
++sisusb_clear_vram_57466 sisusb_clear_vram 3-2 57466 NULL
++ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL nohasharray
++sep_lock_user_pages_57470 sep_lock_user_pages 2-3 57470 &ieee80211_if_read_flags_57470
++ocfs2_write_cluster_57483 ocfs2_write_cluster 8-2-9 57483 NULL
++bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
++skb_headlen_57501 skb_headlen 0 57501 NULL
++copy_in_user_57502 copy_in_user 3 57502 NULL
++ks8842_read32_57505 ks8842_read32 0 57505 NULL nohasharray
++ckhdid_printf_57505 ckhdid_printf 2 57505 &ks8842_read32_57505
++init_tag_map_57515 init_tag_map 3 57515 NULL
++wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 NULL nohasharray
++il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 &wil_read_file_ssid_57517
++inode_permission_57531 inode_permission 0 57531 NULL
++acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL nohasharray
++DoC_Probe_57534 DoC_Probe 1 57534 &acpi_dev_get_resources_57534
++ext4_group_first_block_no_57559 ext4_group_first_block_no 0-2 57559 NULL
++snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
++uio_find_mem_index_57584 uio_find_mem_index 0 57584 NULL
++read_file_spectral_fft_period_57593 read_file_spectral_fft_period 3 57593 NULL
++wm831x_gpio_to_irq_57614 wm831x_gpio_to_irq 2 57614 NULL
++sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
++tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
++osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
++sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
++mem_read_57631 mem_read 3 57631 NULL
++tc3589x_irq_map_57639 tc3589x_irq_map 2 57639 NULL
++sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
++r3964_write_57662 r3964_write 4 57662 NULL
++proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL
++__lgwrite_57669 __lgwrite 4 57669 NULL
++ieee80211_MFIE_rate_len_57692 ieee80211_MFIE_rate_len 0 57692 NULL
++f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL
++check_prefree_segments_57702 check_prefree_segments 2 57702 NULL
++i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
++ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
++nouveau_gpio_create__57735 nouveau_gpio_create_ 4-5 57735 NULL
++compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL
++ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
++SYSC_process_vm_writev_57776 SYSC_process_vm_writev 3-5 57776 NULL
++ld2_57794 ld2 0 57794 NULL
++ivtv_read_57796 ivtv_read 3 57796 NULL
++generic_ptrace_peekdata_57806 generic_ptrace_peekdata 2 57806 NULL
++ipath_user_sdma_num_pages_57813 ipath_user_sdma_num_pages 0 57813 NULL
++usb_dump_config_57817 usb_dump_config 0 57817 NULL
++bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
++copy_to_user_57835 copy_to_user 3 57835 NULL
++flash_read_57843 flash_read 3 57843 NULL
++xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
++emi26_writememory_57908 emi26_writememory 4 57908 NULL
++iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
++memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
++twl_i2c_write_57923 twl_i2c_write 4-3 57923 NULL
++__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
++sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
++key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
++ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
++ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
++i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
++hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
++c2_reg_user_mr_57982 c2_reg_user_mr 2-3 57982 NULL
++rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
++regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
++iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
++io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
++mce_async_out_58056 mce_async_out 3 58056 NULL
++ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
++dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
++cm4040_write_58079 cm4040_write 3 58079 NULL
++udi_log_event_58105 udi_log_event 3 58105 NULL
++savemem_58129 savemem 3 58129 NULL
++ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
++slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
++garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
++asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
++ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
++hva_to_pfn_58241 hva_to_pfn 1 58241 NULL
++btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
++read_file_debug_58256 read_file_debug 3 58256 NULL
++cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
++profile_load_58267 profile_load 3 58267 NULL
++acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
++r100_mm_rreg_58276 r100_mm_rreg 0 58276 NULL
++iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
++ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
++tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
++pcim_iomap_58334 pcim_iomap 3 58334 NULL
++diva_init_dma_map_58336 diva_init_dma_map 3 58336 NULL
++next_pidmap_58347 next_pidmap 2 58347 NULL
++SyS_migrate_pages_58348 SyS_migrate_pages 2 58348 NULL
++vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
++save_hint_58359 save_hint 2 58359 NULL
++brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
++hash_ipportnet6_expire_58379 hash_ipportnet6_expire 3 58379 NULL
++il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
++kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3 58406 NULL
++i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
++__mlx4_alloc_mtt_range_58418 __mlx4_alloc_mtt_range 2 58418 NULL
++__iio_add_chan_devattr_58451 __iio_add_chan_devattr 0 58451 NULL
++capabilities_read_58457 capabilities_read 3 58457 NULL
++batadv_iv_ogm_aggr_packet_58462 batadv_iv_ogm_aggr_packet 3 58462 NULL
++lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
++compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
++snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
++snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
++batadv_bla_is_backbone_gw_58488 batadv_bla_is_backbone_gw 3 58488 NULL
++memblock_alloc_try_nid_58493 memblock_alloc_try_nid 1-2 58493 NULL
++rndis_add_response_58544 rndis_add_response 2 58544 NULL
++__clear_discard_58546 __clear_discard 2 58546 NULL
++wrap_max_58548 wrap_max 0-1-2 58548 NULL
++wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
++sip_sprintf_addr_port_58574 sip_sprintf_addr_port 0 58574 NULL
++scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
++ea_read_inline_58589 ea_read_inline 0 58589 NULL
++isku_sysfs_read_keys_thumbster_58590 isku_sysfs_read_keys_thumbster 6 58590 NULL
++xip_file_read_58592 xip_file_read 3 58592 NULL
++gdth_search_isa_58595 gdth_search_isa 1 58595 NULL
++ebt_buf_count_58607 ebt_buf_count 0 58607 NULL
++skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
++module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL nohasharray
++efi_ioremap_58634 efi_ioremap 1-2 58634 &module_alloc_update_bounds_rx_58634
++tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
++ocfs2_block_to_cluster_start_58653 ocfs2_block_to_cluster_start 2 58653 NULL
++__gfn_to_pfn_58671 __gfn_to_pfn 2 58671 NULL
++iwl_trans_send_cmd_58681 iwl_trans_send_cmd 0 58681 NULL
++find_zero_58685 find_zero 0-1 58685 NULL nohasharray
++mcs7830_set_reg_async_58685 mcs7830_set_reg_async 3 58685 &find_zero_58685
++uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
++pci_alloc_consistent_58688 pci_alloc_consistent 0 58688 NULL
++tps6586x_writes_58689 tps6586x_writes 3-2 58689 NULL
++vmalloc_node_58700 vmalloc_node 1 58700 NULL
++acpi_map_58725 acpi_map 1-2 58725 NULL
++da9052_gpio_to_irq_58729 da9052_gpio_to_irq 2 58729 NULL
++csum_exist_in_range_58730 csum_exist_in_range 2 58730 NULL
++frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
++ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
++agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
++oblock_to_dblock_58762 oblock_to_dblock 0-2 58762 NULL
++__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
++regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
++raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
++isku_sysfs_read_58806 isku_sysfs_read 6-5 58806 NULL
++ep_read_58813 ep_read 3 58813 NULL
++command_write_58841 command_write 3 58841 NULL
++ocfs2_truncate_log_append_58850 ocfs2_truncate_log_append 3 58850 NULL
++ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
++gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
++cs553x_init_one_58886 cs553x_init_one 3 58886 NULL
++raw_ctl_compat_ioctl_58905 raw_ctl_compat_ioctl 3 58905 NULL
++print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
++tun_chr_compat_ioctl_58921 tun_chr_compat_ioctl 3 58921 NULL
++pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
++st5481_isoc_flatten_58952 st5481_isoc_flatten 0 58952 NULL
++ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout 3 58965 NULL
++idx_to_kaddr_58968 idx_to_kaddr 0 58968 NULL
++crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
++remap_to_cache_dirty_58991 remap_to_cache_dirty 4-3 58991 NULL
++handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
++edac_align_ptr_59003 edac_align_ptr 0 59003 NULL
++ep_write_59008 ep_write 3 59008 NULL
++i915_ring_stop_write_59010 i915_ring_stop_write 3 59010 NULL
++SyS_preadv_59029 SyS_preadv 3 59029 NULL
++init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
++selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
++crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
++regmap_bulk_write_59049 regmap_bulk_write 4-2 59049 NULL
++vfio_device_fops_compat_ioctl_59111 vfio_device_fops_compat_ioctl 3 59111 NULL
++mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
++scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
++nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
++__iio_add_event_config_attrs_59136 __iio_add_event_config_attrs 0 59136 NULL
++print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
++framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
++radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
++pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
++setup_window_59178 setup_window 4-2-5-7 59178 NULL
++ocfs2_move_extent_59187 ocfs2_move_extent 3 59187 NULL
++xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
++fast_rx_path_59214 fast_rx_path 3 59214 NULL
++inftl_partscan_59216 inftl_partscan 0 59216 NULL
++skb_transport_header_59223 skb_transport_header 0 59223 NULL
++dt3155_read_59226 dt3155_read 3 59226 NULL
++paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL
++tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
++solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
++nla_len_59258 nla_len 0 59258 NULL
++btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
++fd_copyout_59323 fd_copyout 3 59323 NULL
++read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
++rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
++paging64_get_level1_sp_gpa_59346 paging64_get_level1_sp_gpa 0 59346 NULL nohasharray
++xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 &paging64_get_level1_sp_gpa_59346
++xfs_dir2_sf_entsize_59366 xfs_dir2_sf_entsize 0-2 59366 NULL
++pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
++fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
++isku_sysfs_read_keys_function_59412 isku_sysfs_read_keys_function 6 59412 NULL
++vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
++squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
++SyS_sched_setaffinity_59442 SyS_sched_setaffinity 2 59442 NULL
++fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
++descriptor_loc_59446 descriptor_loc 3 59446 NULL
++do_compat_semctl_59449 do_compat_semctl 4 59449 NULL
++virtqueue_add_buf_59470 virtqueue_add_buf 3-4 59470 NULL
++ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
++nfsd_nrpools_59503 nfsd_nrpools 0 59503 NULL
++rds_pin_pages_59507 rds_pin_pages 0-1-2 59507 NULL
++mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL
++tunables_write_59563 tunables_write 3 59563 NULL
++memdup_user_59590 memdup_user 2 59590 NULL
++tps6586x_irq_get_virq_59601 tps6586x_irq_get_virq 2 59601 NULL
++mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 NULL
++mtrr_write_59622 mtrr_write 3 59622 NULL
++find_first_zero_bit_59636 find_first_zero_bit 0-2 59636 NULL
++ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
++hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
++v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
++paravirt_sched_clock_59660 paravirt_sched_clock 0 59660 NULL
++__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL
++iwl_dbgfs_mac_params_read_59666 iwl_dbgfs_mac_params_read 3 59666 NULL
++alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
++can_nocow_odirect_59681 can_nocow_odirect 3 59681 NULL
++sriov_enable_59689 sriov_enable 2 59689 NULL
++mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
++prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
++ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
++qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
++strnlen_59746 strnlen 0 59746 NULL
++sctp_manip_pkt_59749 sctp_manip_pkt 4 59749 NULL
++ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
++long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
++venus_remove_59781 venus_remove 4 59781 NULL
++mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL
++C_SYSC_preadv_59801 C_SYSC_preadv 3 59801 NULL
++ipw_write_59807 ipw_write 3 59807 NULL
++rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
++scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
++ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
++gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
++regmap_raw_write_async_59849 regmap_raw_write_async 2-4 59849 NULL
++pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
++shmem_zero_setup_59885 shmem_zero_setup 0 59885 NULL nohasharray
++start_transaction_59885 start_transaction 2 59885 &shmem_zero_setup_59885
++ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
++swiotlb_map_page_59909 swiotlb_map_page 3 59909 NULL
++il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 NULL nohasharray
++dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 &il_dbgfs_rxon_flags_read_59950
++lookup_node_59953 lookup_node 2 59953 NULL
++il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL nohasharray
++compat_ipmi_ioctl_59956 compat_ipmi_ioctl 3 59956 &il_dbgfs_missed_beacon_read_59956
++kvm_set_cr3_59965 kvm_set_cr3 2 59965 NULL
++fb_getput_cmap_59971 fb_getput_cmap 3 59971 NULL
++__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
++osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
++ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
++ieee80211_if_fmt_dot11MeshAwakeWindowDuration_60006 ieee80211_if_fmt_dot11MeshAwakeWindowDuration 3 60006 NULL
++rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
++mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
++register_device_60015 register_device 2-3 60015 NULL
++osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
++xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
++sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
++bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
++do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
++pin_2_irq_60050 pin_2_irq 0-3 60050 NULL nohasharray
++vcs_size_60050 vcs_size 0 60050 &pin_2_irq_60050
++gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL
++compat_writev_60063 compat_writev 3 60063 NULL
++ieee80211_build_probe_req_60064 ieee80211_build_probe_req 6-8 60064 NULL
++c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
++mp_register_gsi_60079 mp_register_gsi 2 60079 NULL
++rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
++ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
++SYSC_msgsnd_60113 SYSC_msgsnd 3 60113 NULL
++ttm_bo_kmap_60118 ttm_bo_kmap 3-2 60118 NULL
++jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
++init_state_60165 init_state 2 60165 NULL
++sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
++jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
++fuse_async_req_send_60183 fuse_async_req_send 0-3 60183 NULL
++rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
++svc_compat_ioctl_60194 svc_compat_ioctl 3 60194 NULL
++ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
++qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
++set_tap_pfs_60203 set_tap_pfs 3 60203 NULL
++ieee80211_mgmt_tx_60209 ieee80211_mgmt_tx 7 60209 NULL
++btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL
++arizona_map_irq_60230 arizona_map_irq 2 60230 NULL nohasharray
++__phys_addr_nodebug_60230 __phys_addr_nodebug 0-1 60230 &arizona_map_irq_60230
++wm831x_irq_60254 wm831x_irq 2 60254 NULL
++compat_sys_fcntl64_60256 compat_sys_fcntl64 3 60256 NULL
++printer_write_60276 printer_write 3 60276 NULL
++__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
++do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
++getDataLength_60301 getDataLength 0 60301 NULL
++ceph_parse_server_name_60318 ceph_parse_server_name 2 60318 NULL
++__kfifo_from_user_r_60345 __kfifo_from_user_r 3-5 60345 NULL
++dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
++ubi_eba_atomic_leb_change_60379 ubi_eba_atomic_leb_change 5 60379 NULL
++instruction_pointer_60384 instruction_pointer 0 60384 NULL
++drop_outstanding_extent_60390 drop_outstanding_extent 0 60390 NULL
++mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
++ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
++driver_names_read_60399 driver_names_read 3 60399 NULL
++paging32_walk_addr_generic_60415 paging32_walk_addr_generic 4 60415 NULL
++simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
++excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
++tstats_write_60432 tstats_write 3 60432 NULL nohasharray
++kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
++tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
++rx_data_60442 rx_data 4 60442 NULL nohasharray
++scaled_div32_60442 scaled_div32 1-2 60442 &rx_data_60442
++tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
++snd_hda_get_num_raw_conns_60462 snd_hda_get_num_raw_conns 0 60462 NULL
++crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
++ath_tx_init_60515 ath_tx_init 2 60515 NULL
++hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
++v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
++nonpaging_map_60551 nonpaging_map 4 60551 NULL
++nfsd_hashsize_60562 nfsd_hashsize 0 60562 NULL
++hash_net6_expire_60598 hash_net6_expire 3 60598 NULL
++skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
++wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
++acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
++ubifs_recover_leb_60639 ubifs_recover_leb 3 60639 NULL
++fb_get_fscreeninfo_60640 fb_get_fscreeninfo 3 60640 NULL
++if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
++ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
++read_vbt_r10_60679 read_vbt_r10 1 60679 NULL
++init_data_container_60709 init_data_container 1 60709 NULL
++snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
++raid_status_60755 raid_status 5 60755 NULL
++sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
++opticon_write_60775 opticon_write 4 60775 NULL
++acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
++snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
++pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
++alloc_buf_60864 alloc_buf 3-2 60864 NULL
++generic_writepages_60871 generic_writepages 0 60871 NULL
++ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL
++iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
++mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
++scrub_chunk_60926 scrub_chunk 5 60926 NULL
++sys_mlock_60932 sys_mlock 1 60932 NULL
++pti_char_write_60960 pti_char_write 3 60960 NULL
++mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
++__a2mp_build_60987 __a2mp_build 3 60987 NULL
++hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
++ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
++symtab_init_61050 symtab_init 2 61050 NULL
++fuse_send_write_61053 fuse_send_write 0-4 61053 NULL
++bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-4-2 61062 NULL
++ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
++get_derived_key_61100 get_derived_key 4 61100 NULL
++alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
++p80211_headerlen_61119 p80211_headerlen 0 61119 NULL nohasharray
++__probe_kernel_read_61119 __probe_kernel_read 3 61119 &p80211_headerlen_61119
++vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
++afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
++brcmf_sdio_chip_cr4_exitdl_61143 brcmf_sdio_chip_cr4_exitdl 4 61143 NULL
++__vmalloc_61168 __vmalloc 1 61168 NULL
++event_oom_late_read_61175 event_oom_late_read 3 61175 NULL nohasharray
++pair_device_61175 pair_device 4 61175 &event_oom_late_read_61175
++sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
++SyS_prctl_61202 SyS_prctl 4 61202 NULL
++arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
++smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
++btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL
++vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL
++sys_add_key_61288 sys_add_key 4 61288 NULL nohasharray
++nvme_trans_copy_to_user_61288 nvme_trans_copy_to_user 3 61288 &sys_add_key_61288
++ext4_issue_discard_61305 ext4_issue_discard 2 61305 NULL
++xfer_from_user_61307 xfer_from_user 3 61307 NULL
++timespec_to_ns_61317 timespec_to_ns 0 61317 NULL
++xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
++C_SYSC_msgsnd_61330 C_SYSC_msgsnd 2-3 61330 NULL
++st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
++rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
++f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
++debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
++sys_ptrace_61369 sys_ptrace 3 61369 NULL
++change_xattr_61390 change_xattr 5 61390 NULL
++size_entry_mwt_61400 size_entry_mwt 0 61400 NULL
++dma_ops_area_alloc_61440 dma_ops_area_alloc 3-4-5 61440 NULL
++tc3589x_irq_unmap_61447 tc3589x_irq_unmap 2 61447 NULL
++unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
++snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 4-2-5 61483 NULL
++btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
++erst_errno_61526 erst_errno 0 61526 NULL
++ntfs_attr_lookup_61539 ntfs_attr_lookup 0 61539 NULL
++get_ohm_of_thermistor_61545 get_ohm_of_thermistor 2 61545 NULL
++o2hb_pop_count_61553 o2hb_pop_count 2 61553 NULL
++dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
++ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
++seq_open_private_61589 seq_open_private 3 61589 NULL
++__get_vm_area_61599 __get_vm_area 1 61599 NULL
++nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL
++ncp_compat_ioctl_61608 ncp_compat_ioctl 3 61608 NULL
++configfs_write_file_61621 configfs_write_file 3 61621 NULL
++ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL
++i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
++snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
++resize_stripes_61650 resize_stripes 2 61650 NULL
++ttm_page_pool_free_61661 ttm_page_pool_free 2 61661 NULL
++insert_one_name_61668 insert_one_name 7 61668 NULL
++lock_loop_61681 lock_loop 1 61681 NULL
++__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
++filter_read_61692 filter_read 3 61692 NULL
++iov_length_61716 iov_length 0 61716 NULL
++fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
++read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
++read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
++batadv_dat_snoop_incoming_arp_reply_61801 batadv_dat_snoop_incoming_arp_reply 3 61801 NULL
++tps80031_irq_init_61830 tps80031_irq_init 3 61830 NULL
++bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
++fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
++evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
++SYSC_lsetxattr_61869 SYSC_lsetxattr 4 61869 NULL
++get_fw_name_61874 get_fw_name 3 61874 NULL
++free_init_pages_61875 free_init_pages 2 61875 NULL
++twl4030_sih_setup_61878 twl4030_sih_setup 3 61878 NULL
++ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
++ax25_addr_size_61899 ax25_addr_size 0 61899 NULL nohasharray
++cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 &ax25_addr_size_61899
++clear_refs_write_61904 clear_refs_write 3 61904 NULL
++rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
++au0828_init_isoc_61917 au0828_init_isoc 3-2 61917 NULL
++sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
++send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
++gfn_to_pfn_memslot_atomic_61947 gfn_to_pfn_memslot_atomic 2 61947 NULL
++il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
++squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
++mlx4_alloc_mtt_range_61966 mlx4_alloc_mtt_range 2 61966 NULL
++ocfs2_quota_write_61972 ocfs2_quota_write 5-4 61972 NULL
++cow_file_range_61979 cow_file_range 3 61979 NULL
++module_alloc_exec_61991 module_alloc_exec 1 61991 NULL
++virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
++xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
++jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
++SYSC_select_62024 SYSC_select 1 62024 NULL
++pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
++ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
++sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
++do_pselect_62061 do_pselect 1 62061 NULL
++pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2-3 62074 NULL
++get_domain_for_dev_62099 get_domain_for_dev 2 62099 NULL
++ipath_user_sdma_pin_pages_62100 ipath_user_sdma_pin_pages 3-5 62100 NULL
++jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
++llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
++qib_diag_write_62133 qib_diag_write 3 62133 NULL
++ql_status_62135 ql_status 5 62135 NULL
++video_usercopy_62151 video_usercopy 2 62151 NULL
++prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
++alloc_upcall_62186 alloc_upcall 2 62186 NULL
++btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
++sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
++SYSC_setgroups16_62232 SYSC_setgroups16 1 62232 NULL
++nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
++allocate_partition_62245 allocate_partition 4 62245 NULL
++__qib_get_user_pages_62287 __qib_get_user_pages 1-2 62287 NULL
++il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
++sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
++subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
++udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
++Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
++subseq_list_62332 subseq_list 3-0 62332 NULL
++flash_write_62354 flash_write 3 62354 NULL
++set_wd_exp_mode_pfs_62372 set_wd_exp_mode_pfs 3 62372 NULL
++twl_get_num_slaves_62386 twl_get_num_slaves 0 62386 NULL
++rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
++altera_irscan_62396 altera_irscan 2 62396 NULL
++set_ssp_62411 set_ssp 4 62411 NULL
++unmap_single_62423 unmap_single 2 62423 NULL
++netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
++e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
++ip_vs_icmp_xmit_v6_62477 ip_vs_icmp_xmit_v6 4 62477 NULL
++ceph_dns_resolve_name_62488 ceph_dns_resolve_name 2 62488 NULL
++remove_mapping_62491 remove_mapping 2 62491 NULL
++mlx4_en_create_rx_ring_62498 mlx4_en_create_rx_ring 3 62498 NULL
++ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL
++pep_sendmsg_62524 pep_sendmsg 4 62524 NULL nohasharray
++i915_next_seqno_read_62524 i915_next_seqno_read 3 62524 &pep_sendmsg_62524
++test_iso_queue_62534 test_iso_queue 5 62534 NULL
++debugfs_read_62535 debugfs_read 3 62535 NULL
++sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
++qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
++xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
++get_subdir_62581 get_subdir 3 62581 NULL
++prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 NULL nohasharray
++nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 &prism2_send_mgmt_62605
++iommu_area_alloc_62619 iommu_area_alloc 2-3-4-7 62619 NULL
++ems_pcmcia_add_card_62627 ems_pcmcia_add_card 2 62627 NULL
++compat_rangeinfo_62630 compat_rangeinfo 2 62630 NULL
++lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
++memblock_alloc_nid_62652 memblock_alloc_nid 1-2 62652 NULL
++ima_file_mmap_62663 ima_file_mmap 0 62663 NULL
++write_62671 write 3 62671 NULL
++printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
++qla4_83xx_rd_reg_62693 qla4_83xx_rd_reg 0 62693 NULL
++ioremap_wc_62695 ioremap_wc 1-2 62695 NULL
++bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
++rdm_62719 rdm 0 62719 NULL
++key_replays_read_62746 key_replays_read 3 62746 NULL
++init_chip_wc_pat_62768 init_chip_wc_pat 2 62768 NULL
++ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
++page_key_alloc_62771 page_key_alloc 0 62771 NULL
++C_SYSC_ipc_62776 C_SYSC_ipc 5-3-6-4 62776 NULL
++tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
++__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
++bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL
++xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
++rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
++set_swbp_62853 set_swbp 3 62853 NULL
++hpi_read_word_62862 hpi_read_word 0 62862 NULL
++aoechr_write_62883 aoechr_write 3 62883 NULL
++resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
++if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
++mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
++getdqbuf_62908 getdqbuf 1 62908 NULL
++try_async_pf_62914 try_async_pf 3 62914 NULL nohasharray
++SyS_remap_file_pages_62914 SyS_remap_file_pages 1 62914 &try_async_pf_62914
++agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
++__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
++pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
++scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
++gso_pskb_expand_head_63052 gso_pskb_expand_head 2 63052 NULL
++unlink1_63059 unlink1 3 63059 NULL
++xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL
++ocfs2_decrease_refcount_63078 ocfs2_decrease_refcount 3 63078 NULL
++sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 NULL
++iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
++alloc_cblock_63133 alloc_cblock 2 63133 NULL
++ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
++xen_zap_pfn_range_63149 xen_zap_pfn_range 1 63149 NULL
++smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
++vme_master_read_63221 vme_master_read 0 63221 NULL
++SyS_gethostname_63227 SyS_gethostname 2 63227 NULL
++module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
++ptp_read_63251 ptp_read 4 63251 NULL
++raid5_resize_63306 raid5_resize 2 63306 NULL
++proc_info_read_63344 proc_info_read 3 63344 NULL
++ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
++idmouse_read_63374 idmouse_read 3 63374 NULL
++edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL nohasharray
++usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 &edac_pci_alloc_ctl_info_63388
++rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
++nouveau_event_create_63411 nouveau_event_create 1 63411 NULL
++l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
++sep_prepare_input_output_dma_table_63429 sep_prepare_input_output_dma_table 2-4-3 63429 NULL
++kone_send_63435 kone_send 4 63435 NULL
++gfn_to_hva_many_63437 gfn_to_hva_many 0-2 63437 NULL
++nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
++ipv6_is_mld_63461 ipv6_is_mld 3 63461 NULL
++snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
++reada_find_extent_63486 reada_find_extent 2 63486 NULL
++read_kcore_63488 read_kcore 3 63488 NULL
++save_hint_63497 save_hint 2 63497 NULL
++snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
++ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
++if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
++append_to_buffer_63550 append_to_buffer 3 63550 NULL
++dbg_leb_write_63555 dbg_leb_write 4-5 63555 NULL nohasharray
++kvm_write_guest_page_63555 kvm_write_guest_page 5-2 63555 &dbg_leb_write_63555
++ubifs_lpt_scan_nolock_63572 ubifs_lpt_scan_nolock 0 63572 NULL
++iwch_reg_user_mr_63575 iwch_reg_user_mr 2-3 63575 NULL
++ocfs2_calc_trunc_pos_63576 ocfs2_calc_trunc_pos 4 63576 NULL
++rproc_alloc_63577 rproc_alloc 5 63577 NULL
++ext3_clear_blocks_63597 ext3_clear_blocks 4-5 63597 NULL
++module_alloc_63630 module_alloc 1 63630 NULL
++ntfs_malloc_nofs_nofail_63631 ntfs_malloc_nofs_nofail 1 63631 NULL
++symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
++_ubh_find_next_zero_bit__63640 _ubh_find_next_zero_bit_ 4-5-3 63640 NULL
++proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
++ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
++hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
++vbi_read_63673 vbi_read 3 63673 NULL nohasharray
++xen_register_pirq_63673 xen_register_pirq 1-2 63673 &vbi_read_63673
++alloc_tty_driver_63681 alloc_tty_driver 1 63681 NULL
++mkiss_compat_ioctl_63686 mkiss_compat_ioctl 4 63686 NULL
++arizona_irq_map_63709 arizona_irq_map 2 63709 NULL
++nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
++btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
++selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
++snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
++snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
++spidev_compat_ioctl_63778 spidev_compat_ioctl 2-3 63778 NULL
++snapshot_compat_ioctl_63792 snapshot_compat_ioctl 3 63792 NULL
++kovaplus_sysfs_write_63795 kovaplus_sysfs_write 6 63795 NULL
++mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
++copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
++dec_zcache_eph_zbytes_63817 dec_zcache_eph_zbytes 1 63817 NULL
++prepare_copy_63826 prepare_copy 2 63826 NULL
++sel_write_load_63830 sel_write_load 3 63830 NULL
++proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
++init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
++divas_write_63901 divas_write 3 63901 NULL
++xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
++uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-3-2 63922 NULL
++snd_compr_write_63923 snd_compr_write 3 63923 NULL
++acpi_ev_get_gpe_xrupt_block_63924 acpi_ev_get_gpe_xrupt_block 1 63924 NULL
++tipc_send2port_63935 tipc_send2port 5 63935 NULL
++afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
++__team_options_register_63941 __team_options_register 3 63941 NULL
++macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
++ieee80211_if_fmt_rc_rateidx_mcs_mask_2ghz_63968 ieee80211_if_fmt_rc_rateidx_mcs_mask_2ghz 3 63968 NULL
++ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
++diva_xdi_write_63975 diva_xdi_write 4 63975 NULL
++read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
++kmemdup_64015 kmemdup 2 64015 NULL
++SyS_rt_sigpending_64018 SyS_rt_sigpending 2 64018 NULL
++offset_to_vaddr_64025 offset_to_vaddr 0-2 64025 NULL nohasharray
++tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 &offset_to_vaddr_64025
++dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
++resize_async_buffer_64031 resize_async_buffer 4 64031 NULL
++sep_lli_table_secure_dma_64042 sep_lli_table_secure_dma 2-3 64042 NULL
++tfrc_calc_x_reverse_lookup_64057 tfrc_calc_x_reverse_lookup 0 64057 NULL
++get_u8_64076 get_u8 0 64076 NULL
++sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
++vmci_handle_arr_get_size_64088 vmci_handle_arr_get_size 0 64088 NULL
++lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
++SyS_mq_timedsend_64107 SyS_mq_timedsend 3 64107 NULL
++do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
++ol_quota_entries_per_block_64122 ol_quota_entries_per_block 0 64122 NULL
++ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL
++init_bch_64130 init_bch 1-2 64130 NULL
++SYSC_ptrace_64136 SYSC_ptrace 3-4 64136 NULL
++uea_idma_write_64139 uea_idma_write 3 64139 NULL
++ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
++dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
++__comedi_buf_alloc_64155 __comedi_buf_alloc 3 64155 NULL
++cpumask_scnprintf_64170 cpumask_scnprintf 2 64170 NULL
++read_pulse_64227 read_pulse 0-3 64227 NULL
++header_len_64232 header_len 0 64232 NULL
++redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
++io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
++btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
++sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL
++event_id_read_64288 event_id_read 3 64288 NULL nohasharray
++xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
++ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
++error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
++ffz_64324 ffz 0 64324 NULL
++map_region_64328 map_region 1 64328 NULL
++sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
++ts_write_64336 ts_write 3 64336 NULL
++usbtmc_write_64340 usbtmc_write 3 64340 NULL
++do_write_orph_node_64343 do_write_orph_node 2 64343 NULL
++ft1000_read_reg_64352 ft1000_read_reg 0 64352 NULL
++bnx2x_vfop_mcast_cmd_64354 bnx2x_vfop_mcast_cmd 5 64354 NULL
++wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
++ilo_write_64378 ilo_write 3 64378 NULL
++btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
++vmcs_readl_64381 vmcs_readl 0 64381 NULL
++nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
++ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
++pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
++rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
++snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
++keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
++nl80211_send_mgmt_64419 nl80211_send_mgmt 7 64419 NULL
++oom_adj_write_64428 oom_adj_write 3 64428 NULL
++ext4_trim_extent_64431 ext4_trim_extent 4 64431 NULL nohasharray
++read_file_spectral_short_repeat_64431 read_file_spectral_short_repeat 3 64431 &ext4_trim_extent_64431
++cap_capable_64462 cap_capable 0 64462 NULL
++ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
++single_open_size_64483 single_open_size 4 64483 NULL
++p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
++msg_data_sz_64503 msg_data_sz 0 64503 NULL
++remove_uuid_64505 remove_uuid 4 64505 NULL nohasharray
++handle_abnormal_pfn_64505 handle_abnormal_pfn 3 64505 &remove_uuid_64505
++crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
++opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
++ses_send_diag_64527 ses_send_diag 4 64527 NULL
++prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
++SyS_bind_64544 SyS_bind 3 64544 NULL
++rbd_obj_read_sync_64554 rbd_obj_read_sync 3-4 64554 NULL
++__spi_sync_64561 __spi_sync 0 64561 NULL
++__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
++fanotify_write_64623 fanotify_write 3 64623 NULL
++to_dblock_64655 to_dblock 0-1 64655 NULL
++regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL
++ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray
++tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661
++efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
++rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
++nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
++sec_bulk_write_64691 sec_bulk_write 2-3 64691 NULL
++__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
++snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
++dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
++atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
++i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
++squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
++bio_map_kern_64751 bio_map_kern 3 64751 NULL
++rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
++isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
++regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL
++nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
++megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
++ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
++vaddr_get_pfn_64818 vaddr_get_pfn 1 64818 NULL
++gfn_to_page_64826 gfn_to_page 2 64826 NULL
++do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
++altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
++gfn_to_pfn_64870 gfn_to_pfn 2 64870 NULL
++ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
++ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
++ubifs_wbuf_write_nolock_64946 ubifs_wbuf_write_nolock 3 64946 NULL
++snd_rawmidi_ioctl_compat_64954 snd_rawmidi_ioctl_compat 3 64954 NULL
++ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
++acpi_os_install_interrupt_handler_64968 acpi_os_install_interrupt_handler 1 64968 NULL
++traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
++suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
++ext2_group_first_block_no_64972 ext2_group_first_block_no 0-2 64972 NULL
++pskb_pull_65005 pskb_pull 2 65005 NULL
++unifi_write_65012 unifi_write 3 65012 NULL
++crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
++nfs_readdata_alloc_65015 nfs_readdata_alloc 2 65015 NULL
++insert_dent_65034 insert_dent 7 65034 NULL
++compat_put_ushort_65040 compat_put_ushort 1 65040 NULL
++brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
++tty_audit_log_65043 tty_audit_log 8 65043 NULL
++compat_cmdtest_65064 compat_cmdtest 2 65064 NULL
++count_run_65072 count_run 0-2-4 65072 NULL nohasharray
++bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 &count_run_65072
++__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2-3 65076 NULL
++ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
++ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 0 65090 NULL
++C_SYSC_semctl_65091 C_SYSC_semctl 4 65091 NULL
++ssb_bus_register_65183 ssb_bus_register 3 65183 NULL
++rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
++print_endpoint_stat_65232 print_endpoint_stat 3-4-0 65232 NULL
++whci_n_caps_65247 whci_n_caps 0 65247 NULL
++atomic_long_read_65263 atomic_long_read 0 65263 NULL
++kmem_zalloc_greedy_65268 kmem_zalloc_greedy 3-2 65268 NULL
++kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
++compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
++mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
++redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
++get_var_len_65304 get_var_len 0 65304 NULL
++unpack_array_65318 unpack_array 0 65318 NULL
++pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL
++dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
++init_list_set_65351 init_list_set 2-3 65351 NULL
++dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
++batadv_tt_save_orig_buffer_65361 batadv_tt_save_orig_buffer 4 65361 NULL
++alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
++__ext4_new_inode_65370 __ext4_new_inode 5 65370 NULL
++strchr_65372 strchr 0 65372 NULL nohasharray
++SyS_writev_65372 SyS_writev 3 65372 &strchr_65372
++__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1-2 65397 NULL
++trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
++mtd_get_device_size_65400 mtd_get_device_size 0 65400 NULL
++iio_device_add_channel_sysfs_65406 iio_device_add_channel_sysfs 0 65406 NULL
++ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL
++drm_calloc_large_65421 drm_calloc_large 1-2 65421 NULL
++xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
++usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
++ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
++dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
++alloc_dr_65495 alloc_dr 2 65495 NULL
+diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
+new file mode 100644
+index 0000000..9db0d0e
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin.c
+@@ -0,0 +1,2114 @@
++/*
++ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -o size_overflow_plugin.so size_overflow_plugin.c
++ * $ gcc -fplugin=size_overflow_plugin.so test.c  -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "toplev.h"
++#include "function.h"
++#include "tree-flow.h"
++#include "plugin.h"
++#include "gimple.h"
++#include "diagnostic.h"
++#include "cfgloop.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++struct size_overflow_hash {
++      const struct size_overflow_hash * const next;
++      const char * const name;
++      const unsigned int param;
++};
++
++#include "size_overflow_hash.h"
++
++enum mark {
++      MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
++};
++
++enum err_code_conditions {
++      CAST_ONLY, FROM_CONST
++};
++
++static unsigned int call_count = 0;
++
++#define __unused __attribute__((__unused__))
++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
++#define BEFORE_STMT true
++#define AFTER_STMT false
++#define CREATE_NEW_VAR NULL_TREE
++#define CODES_LIMIT 32
++#define MAX_PARAM 31
++#define MY_STMT GF_PLF_1
++#define NO_CAST_CHECK GF_PLF_2
++#define FROM_ARG true
++#define FROM_RET false
++
++#if BUILDING_GCC_VERSION == 4005
++#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
++#endif
++
++int plugin_is_GPL_compatible;
++void debug_gimple_stmt(gimple gs);
++
++static tree expand(struct pointer_set_t *visited, tree lhs);
++static enum mark pre_expand(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs);
++static tree report_size_overflow_decl;
++static const_tree const_char_ptr_type_node;
++static unsigned int handle_function(void);
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
++static tree get_size_overflow_type(gimple stmt, const_tree node);
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++static void print_missing_msg(tree func, unsigned int argnum);
++
++static struct plugin_info size_overflow_plugin_info = {
++      .version        = "20130410beta",
++      .help           = "no-size-overflow\tturn off size overflow checking\n",
++};
++
++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
++{
++      unsigned int arg_count;
++      enum tree_code code = TREE_CODE(*node);
++
++      switch (code) {
++      case FUNCTION_DECL:
++              arg_count = type_num_arguments(TREE_TYPE(*node));
++              break;
++      case FUNCTION_TYPE:
++      case METHOD_TYPE:
++              arg_count = type_num_arguments(*node);
++              break;
++      default:
++              *no_add_attrs = true;
++              error("%s: %qE attribute only applies to functions", __func__, name);
++              return NULL_TREE;
++      }
++
++      for (; args; args = TREE_CHAIN(args)) {
++              tree position = TREE_VALUE(args);
++              if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
++                      error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
++                      *no_add_attrs = true;
++              }
++      }
++      return NULL_TREE;
++}
++
++static const char* get_asm_name(tree node)
++{
++      return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node));
++}
++
++static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
++{
++      unsigned int arg_count, arg_num;
++      enum tree_code code = TREE_CODE(*node);
++
++      switch (code) {
++      case FUNCTION_DECL:
++              arg_count = type_num_arguments(TREE_TYPE(*node));
++              break;
++      case FUNCTION_TYPE:
++      case METHOD_TYPE:
++              arg_count = type_num_arguments(*node);
++              break;
++      case FIELD_DECL:
++              arg_num = TREE_INT_CST_LOW(TREE_VALUE(args));
++              if (arg_num != 0) {
++                      *no_add_attrs = true;
++                      error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name);
++              }
++              return NULL_TREE;
++      default:
++              *no_add_attrs = true;
++              error("%qE attribute only applies to functions", name);
++              return NULL_TREE;
++      }
++
++      if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
++              return NULL_TREE;
++
++      for (; args; args = TREE_CHAIN(args)) {
++              tree position = TREE_VALUE(args);
++              if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
++                      error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
++                      *no_add_attrs = true;
++              }
++      }
++      return NULL_TREE;
++}
++
++static struct attribute_spec size_overflow_attr = {
++      .name                           = "size_overflow",
++      .min_length                     = 1,
++      .max_length                     = -1,
++      .decl_required                  = true,
++      .type_required                  = false,
++      .function_type_required         = false,
++      .handler                        = handle_size_overflow_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity          = false
++#endif
++};
++
++static struct attribute_spec intentional_overflow_attr = {
++      .name                           = "intentional_overflow",
++      .min_length                     = 1,
++      .max_length                     = -1,
++      .decl_required                  = true,
++      .type_required                  = false,
++      .function_type_required         = false,
++      .handler                        = handle_intentional_overflow_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity          = false
++#endif
++};
++
++static void register_attributes(void __unused *event_data, void __unused *data)
++{
++      register_attribute(&size_overflow_attr);
++      register_attribute(&intentional_overflow_attr);
++}
++
++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
++{
++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
++#define cwmixa( in ) { cwfold( in, m, k, h ); }
++#define cwmixb( in ) { cwfold( in, n, h, k ); }
++
++      unsigned int m = 0x57559429;
++      unsigned int n = 0x5052acdb;
++      const unsigned int *key4 = (const unsigned int *)key;
++      unsigned int h = len;
++      unsigned int k = len + seed + n;
++      unsigned long long p;
++
++      while (len >= 8) {
++              cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
++              len -= 8;
++      }
++      if (len >= 4) {
++              cwmixb(key4[0]) key4 += 1;
++              len -= 4;
++      }
++      if (len)
++              cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
++      cwmixb(h ^ (k + n));
++      return k ^ h;
++
++#undef cwfold
++#undef cwmixa
++#undef cwmixb
++}
++
++static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
++{
++      unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
++      unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
++      return fn ^ codes;
++}
++
++static inline tree get_original_function_decl(tree fndecl)
++{
++      if (DECL_ABSTRACT_ORIGIN(fndecl))
++              return DECL_ABSTRACT_ORIGIN(fndecl);
++      return fndecl;
++}
++
++static inline gimple get_def_stmt(const_tree node)
++{
++      gcc_assert(node != NULL_TREE);
++      if (TREE_CODE(node) != SSA_NAME)
++              return NULL;
++      return SSA_NAME_DEF_STMT(node);
++}
++
++static unsigned char get_tree_code(const_tree type)
++{
++      switch (TREE_CODE(type)) {
++      case ARRAY_TYPE:
++              return 0;
++      case BOOLEAN_TYPE:
++              return 1;
++      case ENUMERAL_TYPE:
++              return 2;
++      case FUNCTION_TYPE:
++              return 3;
++      case INTEGER_TYPE:
++              return 4;
++      case POINTER_TYPE:
++              return 5;
++      case RECORD_TYPE:
++              return 6;
++      case UNION_TYPE:
++              return 7;
++      case VOID_TYPE:
++              return 8;
++      case REAL_TYPE:
++              return 9;
++      case VECTOR_TYPE:
++              return 10;
++      case REFERENCE_TYPE:
++              return 11;
++      case OFFSET_TYPE:
++              return 12;
++      case COMPLEX_TYPE:
++              return 13;
++      default:
++              debug_tree((tree)type);
++              gcc_unreachable();
++      }
++}
++
++static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
++{
++      gcc_assert(type != NULL_TREE);
++
++      while (type && len < CODES_LIMIT) {
++              tree_codes[len] = get_tree_code(type);
++              len++;
++              type = TREE_TYPE(type);
++      }
++      return len;
++}
++
++static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
++{
++      const_tree arg, result, arg_field, type = TREE_TYPE(fndecl);
++      enum tree_code code = TREE_CODE(type);
++      size_t len = 0;
++
++      gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
++
++      arg = TYPE_ARG_TYPES(type);
++      // skip builtins __builtin_constant_p
++      if (!arg && DECL_BUILT_IN(fndecl))
++              return 0;
++
++      if (TREE_CODE_CLASS(code) == tcc_type)
++              result = type;
++      else
++              result = DECL_RESULT(fndecl);
++
++      gcc_assert(result != NULL_TREE);
++      len = add_type_codes(TREE_TYPE(result), tree_codes, len);
++
++      if (arg == NULL_TREE) {
++              gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON));
++              arg_field = DECL_ARGUMENT_FLD(fndecl);
++              if (arg_field == NULL_TREE)
++                      return 0;
++              arg = TREE_TYPE(arg_field);
++              len = add_type_codes(arg, tree_codes, len);
++              gcc_assert(len != 0);
++              return len;
++      }
++
++      gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST);
++      while (arg && len < CODES_LIMIT) {
++              len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
++              arg = TREE_CHAIN(arg);
++      }
++
++      gcc_assert(len != 0);
++      return len;
++}
++
++static const struct size_overflow_hash *get_function_hash(tree fndecl)
++{
++      unsigned int hash;
++      const struct size_overflow_hash *entry;
++      unsigned char tree_codes[CODES_LIMIT];
++      size_t len;
++      const char *func_name;
++
++      fndecl = get_original_function_decl(fndecl);
++      len = get_function_decl(fndecl, tree_codes);
++      if (len == 0)
++              return NULL;
++
++      func_name = get_asm_name(fndecl);
++      hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
++
++      entry = size_overflow_hash[hash];
++      while (entry) {
++              if (!strcmp(entry->name, func_name))
++                      return entry;
++              entry = entry->next;
++      }
++
++      return NULL;
++}
++
++static bool is_bool(const_tree node)
++{
++      const_tree type;
++
++      if (node == NULL_TREE)
++              return false;
++
++      type = TREE_TYPE(node);
++      if (!INTEGRAL_TYPE_P(type))
++              return false;
++      if (TREE_CODE(type) == BOOLEAN_TYPE)
++              return true;
++      if (TYPE_PRECISION(type) == 1)
++              return true;
++      return false;
++}
++
++static bool skip_types(const_tree var)
++{
++      tree type;
++
++      if (is_gimple_constant(var))
++              return true;
++
++      switch (TREE_CODE(var)) {
++              case ADDR_EXPR:
++#if BUILDING_GCC_VERSION >= 4006
++              case MEM_REF:
++#endif
++              case ARRAY_REF:
++              case BIT_FIELD_REF:
++              case INDIRECT_REF:
++              case TARGET_MEM_REF:
++                      return true;
++              case PARM_DECL:
++              case VAR_DECL:
++              case COMPONENT_REF:
++                      return false;
++              default:
++                      break;
++      }
++
++      gcc_assert(TREE_CODE(var) == SSA_NAME);
++
++      type = TREE_TYPE(var);
++      switch (TREE_CODE(type)) {
++              case INTEGER_TYPE:
++              case ENUMERAL_TYPE:
++                      return false;
++              case BOOLEAN_TYPE:
++                      return is_bool(var);
++              default:
++                      break;
++      }
++
++      gcc_assert(TREE_CODE(type) == POINTER_TYPE);
++
++      type = TREE_TYPE(type);
++      gcc_assert(type != NULL_TREE);
++      switch (TREE_CODE(type)) {
++              case RECORD_TYPE:
++              case POINTER_TYPE:
++              case ARRAY_TYPE:
++                      return true;
++              case VOID_TYPE:
++              case INTEGER_TYPE:
++              case UNION_TYPE:
++                      return false;
++              default:
++                      break;
++      }
++
++      debug_tree((tree)var);
++      gcc_unreachable();
++}
++
++static unsigned int find_arg_number(const_tree arg, tree func)
++{
++      tree var;
++      unsigned int argnum = 1;
++
++      if (TREE_CODE(arg) == SSA_NAME)
++              arg = SSA_NAME_VAR(arg);
++
++      for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
++              if (!operand_equal_p(arg, var, 0) && strcmp(NAME(var), NAME(arg)))
++                      continue;
++              if (!skip_types(var))
++                      return argnum;
++      }
++
++      return 0;
++}
++
++static tree create_new_var(tree type)
++{
++      tree new_var = create_tmp_var(type, "cicus");
++
++#if BUILDING_GCC_VERSION <= 4007
++      add_referenced_var(new_var);
++      mark_sym_for_renaming(new_var);
++#endif
++      return new_var;
++}
++
++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++      gimple assign;
++      gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++      tree type = TREE_TYPE(rhs1);
++      tree lhs = create_new_var(type);
++
++      gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
++      assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++      gimple_set_lhs(assign, make_ssa_name(lhs, assign));
++
++      gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++      gimple_set_plf(assign, MY_STMT, true);
++      return assign;
++}
++
++static tree cast_a_tree(tree type, tree var)
++{
++      gcc_assert(type != NULL_TREE);
++      gcc_assert(var != NULL_TREE);
++      gcc_assert(fold_convertible_p(type, var));
++
++      return fold_convert(type, var);
++}
++
++static tree get_lhs(const_gimple stmt)
++{
++      switch (gimple_code(stmt)) {
++      case GIMPLE_ASSIGN:
++              return gimple_get_lhs(stmt);
++      case GIMPLE_PHI:
++              return gimple_phi_result(stmt);
++      case GIMPLE_CALL:
++              return gimple_call_lhs(stmt);
++      default:
++              return NULL_TREE;
++      }
++}
++
++static bool skip_cast(tree dst_type, const_tree rhs, bool force)
++{
++      const_gimple def_stmt = get_def_stmt(rhs);
++
++      if (force)
++              return false;
++
++      if (is_gimple_constant(rhs))
++              return false;
++
++      if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++              return false;
++
++      if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
++              return false;
++
++      // DI type can be on 32 bit (from create_assign) but overflow type stays DI
++      if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++              return false;
++
++      return true;
++}
++
++static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
++{
++      gimple assign, def_stmt;
++
++      gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
++      if (gsi_end_p(*gsi) && before == AFTER_STMT)
++              gcc_unreachable();
++
++      def_stmt = get_def_stmt(rhs);
++      if (skip_cast(dst_type, rhs, force) && gimple_plf(def_stmt, MY_STMT))
++              return def_stmt;
++
++      if (lhs == CREATE_NEW_VAR)
++              lhs = create_new_var(dst_type);
++
++      assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
++
++      if (!gsi_end_p(*gsi)) {
++              location_t loc = gimple_location(gsi_stmt(*gsi));
++              gimple_set_location(assign, loc);
++      }
++
++      gimple_set_lhs(assign, make_ssa_name(lhs, assign));
++
++      if (before)
++              gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++      else
++              gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++      update_stmt(assign);
++      gimple_set_plf(assign, MY_STMT, true);
++
++      return assign;
++}
++
++static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
++{
++      gimple_stmt_iterator gsi;
++      tree lhs;
++      const_gimple new_stmt;
++
++      if (rhs == NULL_TREE)
++              return NULL_TREE;
++
++      gsi = gsi_for_stmt(stmt);
++      new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
++
++      lhs = get_lhs(new_stmt);
++      gcc_assert(lhs != NULL_TREE);
++      return lhs;
++}
++
++static tree cast_to_TI_type(gimple stmt, tree node)
++{
++      gimple_stmt_iterator gsi;
++      gimple cast_stmt;
++      tree type = TREE_TYPE(node);
++
++      if (types_compatible_p(type, intTI_type_node))
++              return node;
++
++      gsi = gsi_for_stmt(stmt);
++      cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++      return gimple_get_lhs(cast_stmt);
++}
++
++static void check_function_hash(const_gimple stmt)
++{
++      tree func;
++      const struct size_overflow_hash *hash;
++
++      if (gimple_code(stmt) != GIMPLE_CALL)
++              return;
++
++      func = gimple_call_fndecl(stmt);
++      //fs/xattr.c D.34222_15 = D.34219_14 (dentry_3(D), name_7(D), 0B, 0);
++      if (func == NULL_TREE)
++              return;
++
++      hash = get_function_hash(func);
++      if (!hash)
++              print_missing_msg(func, 0);
++}
++
++static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
++{
++      tree lhs, new_lhs;
++      gimple_stmt_iterator gsi;
++
++      if (rhs1 == NULL_TREE) {
++              debug_gimple_stmt(oldstmt);
++              error("%s: rhs1 is NULL_TREE", __func__);
++              gcc_unreachable();
++      }
++
++      switch (gimple_code(oldstmt)) {
++      case GIMPLE_ASM:
++              lhs = rhs1;
++              break;
++      case GIMPLE_CALL:
++              lhs = gimple_call_lhs(oldstmt);
++              break;
++      case GIMPLE_ASSIGN:
++              lhs = gimple_get_lhs(oldstmt);
++              break;
++      default:
++              debug_gimple_stmt(oldstmt);
++              gcc_unreachable();
++      }
++
++      gsi = gsi_for_stmt(oldstmt);
++      pointer_set_insert(visited, oldstmt);
++      if (lookup_stmt_eh_lp(oldstmt) != 0) {
++              basic_block next_bb, cur_bb;
++              const_edge e;
++
++              gcc_assert(before == false);
++              gcc_assert(stmt_can_throw_internal(oldstmt));
++              gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
++              gcc_assert(!gsi_end_p(gsi));
++
++              cur_bb = gimple_bb(oldstmt);
++              next_bb = cur_bb->next_bb;
++              e = find_edge(cur_bb, next_bb);
++              gcc_assert(e != NULL);
++              gcc_assert(e->flags & EDGE_FALLTHRU);
++
++              gsi = gsi_after_labels(next_bb);
++              gcc_assert(!gsi_end_p(gsi));
++
++              before = true;
++              oldstmt = gsi_stmt(gsi);
++      }
++
++      new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
++      return new_lhs;
++}
++
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++{
++      gimple stmt;
++      gimple_stmt_iterator gsi;
++      tree size_overflow_type, new_var, lhs = gimple_get_lhs(oldstmt);
++
++      if (gimple_plf(oldstmt, MY_STMT))
++              return lhs;
++
++      if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
++              rhs1 = gimple_assign_rhs1(oldstmt);
++              rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
++      }
++      if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
++              rhs2 = gimple_assign_rhs2(oldstmt);
++              rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
++      }
++
++      stmt = gimple_copy(oldstmt);
++      gimple_set_location(stmt, gimple_location(oldstmt));
++      gimple_set_plf(stmt, MY_STMT, true);
++
++      if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
++              gimple_assign_set_rhs_code(stmt, MULT_EXPR);
++
++      size_overflow_type = get_size_overflow_type(oldstmt, node);
++
++      new_var = create_new_var(size_overflow_type);
++      new_var = make_ssa_name(new_var, stmt);
++      gimple_set_lhs(stmt, new_var);
++
++      if (rhs1 != NULL_TREE)
++              gimple_assign_set_rhs1(stmt, rhs1);
++
++      if (rhs2 != NULL_TREE)
++              gimple_assign_set_rhs2(stmt, rhs2);
++#if BUILDING_GCC_VERSION >= 4007
++      if (rhs3 != NULL_TREE)
++              gimple_assign_set_rhs3(stmt, rhs3);
++#endif
++      gimple_set_vuse(stmt, gimple_vuse(oldstmt));
++      gimple_set_vdef(stmt, gimple_vdef(oldstmt));
++
++      gsi = gsi_for_stmt(oldstmt);
++      gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
++      update_stmt(stmt);
++      pointer_set_insert(visited, oldstmt);
++      return gimple_get_lhs(stmt);
++}
++
++static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type)
++{
++      basic_block first_bb;
++      gimple assign;
++      gimple_stmt_iterator gsi;
++
++      first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++      gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++      set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
++
++      gsi = gsi_start_bb(first_bb);
++      assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
++      return gimple_get_lhs(assign);
++}
++
++static tree use_phi_ssa_name(tree phi_ssa_name, tree new_arg)
++{
++      gimple_stmt_iterator gsi;
++      const_gimple assign;
++      gimple def_stmt = get_def_stmt(new_arg);
++
++      if (gimple_code(def_stmt) == GIMPLE_PHI) {
++              gsi = gsi_after_labels(gimple_bb(def_stmt));
++              assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, phi_ssa_name, &gsi, BEFORE_STMT, true);
++      } else {
++              gsi = gsi_for_stmt(def_stmt);
++              assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, phi_ssa_name, &gsi, AFTER_STMT, true);
++      }
++
++      return gimple_get_lhs(assign);
++}
++
++static tree cast_visited_phi_arg(tree phi_ssa_name, tree arg, tree size_overflow_type)
++{
++      basic_block bb;
++      gimple_stmt_iterator gsi;
++      const_gimple assign, def_stmt;
++
++      def_stmt = get_def_stmt(arg);
++      bb = gimple_bb(def_stmt);
++      gcc_assert(bb->index != 0);
++      gsi = gsi_after_labels(bb);
++
++      assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
++      return gimple_get_lhs(assign);
++}
++
++static tree create_new_phi_arg(tree phi_ssa_name, tree new_arg, tree arg, gimple oldstmt)
++{
++      tree size_overflow_type;
++      const_gimple def_stmt = get_def_stmt(arg);
++
++      if (phi_ssa_name != NULL_TREE)
++              phi_ssa_name = SSA_NAME_VAR(phi_ssa_name);
++
++      size_overflow_type = get_size_overflow_type(oldstmt, arg);
++
++      if (new_arg != NULL_TREE) {
++              gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
++              return use_phi_ssa_name(phi_ssa_name, new_arg);
++      }
++
++      switch(gimple_code(def_stmt)) {
++      case GIMPLE_PHI:
++              return cast_visited_phi_arg(phi_ssa_name, arg, size_overflow_type);
++      case GIMPLE_NOP:
++              return cast_parm_decl(phi_ssa_name, arg, size_overflow_type);
++      default:
++              debug_gimple_stmt((gimple)def_stmt);
++              gcc_unreachable();
++      }
++}
++
++static gimple overflow_create_phi_node(gimple oldstmt, tree result)
++{
++      basic_block bb;
++      gimple phi;
++      gimple_seq seq;
++      gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
++
++      bb = gsi_bb(gsi);
++
++      phi = create_phi_node(result, bb);
++      gimple_phi_set_result(phi, make_ssa_name(result, phi));
++      seq = phi_nodes(bb);
++      gsi = gsi_last(seq);
++      gsi_remove(&gsi, false);
++
++      gsi = gsi_for_stmt(oldstmt);
++      gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
++      gimple_set_bb(phi, bb);
++      gimple_set_plf(phi, MY_STMT, true);
++      return phi;
++}
++
++static tree handle_phi(struct pointer_set_t *visited, tree orig_result)
++{
++      gimple new_phi = NULL;
++      gimple oldstmt = get_def_stmt(orig_result);
++      tree phi_ssa_name = NULL_TREE;
++      unsigned int i;
++
++      pointer_set_insert(visited, oldstmt);
++      for (i = 0; i < gimple_phi_num_args(oldstmt); i++) {
++              tree arg, new_arg;
++
++              arg = gimple_phi_arg_def(oldstmt, i);
++
++              new_arg = expand(visited, arg);
++              new_arg = create_new_phi_arg(phi_ssa_name, new_arg, arg, oldstmt);
++              if (i == 0) {
++                      phi_ssa_name = new_arg;
++                      new_phi = overflow_create_phi_node(oldstmt, SSA_NAME_VAR(phi_ssa_name));
++              }
++
++              gcc_assert(new_phi != NULL);
++              add_phi_arg(new_phi, new_arg, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
++      }
++
++      gcc_assert(new_phi != NULL);
++      update_stmt(new_phi);
++      return gimple_phi_result(new_phi);
++}
++
++static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
++{
++      const_gimple assign;
++      gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++      tree origtype = TREE_TYPE(orig_rhs);
++
++      gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++
++      assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++      return gimple_get_lhs(assign);
++}
++
++static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++{
++      const_tree rhs1, lhs, rhs1_type, lhs_type;
++      enum machine_mode lhs_mode, rhs_mode;
++      gimple def_stmt = get_def_stmt(no_const_rhs);
++
++      if (!gimple_assign_cast_p(def_stmt))
++              return false;
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      lhs = gimple_get_lhs(def_stmt);
++      rhs1_type = TREE_TYPE(rhs1);
++      lhs_type = TREE_TYPE(lhs);
++      rhs_mode = TYPE_MODE(rhs1_type);
++      lhs_mode = TYPE_MODE(lhs_type);
++      if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++              return false;
++
++      return true;
++}
++
++static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
++{
++      tree rhs1 = gimple_assign_rhs1(stmt);
++      tree lhs = gimple_get_lhs(stmt);
++      const_tree rhs1_type = TREE_TYPE(rhs1);
++      const_tree lhs_type = TREE_TYPE(lhs);
++
++      if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      return create_assign(visited, stmt, rhs1, AFTER_STMT);
++}
++
++static bool no_uses(tree node)
++{
++      imm_use_iterator imm_iter;
++      use_operand_p use_p;
++
++      FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++              const_gimple use_stmt = USE_STMT(use_p);
++              if (use_stmt == NULL)
++                      return true;
++              if (is_gimple_debug(use_stmt))
++                      continue;
++              if (!(gimple_bb(use_stmt)->flags & BB_REACHABLE))
++                      continue;
++              return false;
++      }
++      return true;
++}
++
++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
++static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++{
++      tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++      gimple def_stmt = get_def_stmt(lhs);
++
++      if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++              return false;
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs_type = TREE_TYPE(rhs1);
++      lhs_type = TREE_TYPE(lhs);
++      if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
++              return false;
++      if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
++              return false;
++
++      def_stmt = get_def_stmt(rhs1);
++      if (!def_stmt || gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_num_ops(def_stmt) != 3)
++              return false;
++
++      if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++              return false;
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs2 = gimple_assign_rhs2(def_stmt);
++      if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++              return false;
++
++      if (is_gimple_constant(rhs2))
++              not_const_rhs = rhs1;
++      else
++              not_const_rhs = rhs2;
++
++      return no_uses(not_const_rhs);
++}
++
++static bool skip_lhs_cast_check(const_gimple stmt)
++{
++      const_tree rhs = gimple_assign_rhs1(stmt);
++      const_gimple def_stmt = get_def_stmt(rhs);
++
++      // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
++      if (gimple_code(def_stmt) == GIMPLE_ASM)
++              return true;
++
++      if (is_const_plus_unsigned_signed_truncation(rhs))
++              return true;
++
++      return false;
++}
++
++static tree create_cast_overflow_check(struct pointer_set_t *visited, tree new_rhs1, gimple stmt)
++{
++      bool cast_lhs, cast_rhs;
++      tree lhs = gimple_get_lhs(stmt);
++      tree rhs = gimple_assign_rhs1(stmt);
++      const_tree lhs_type = TREE_TYPE(lhs);
++      const_tree rhs_type = TREE_TYPE(rhs);
++      enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++      enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++      unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
++      unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
++
++      static bool check_lhs[3][4] = {
++              // ss    su     us     uu
++              { false, true,  true,  false }, // lhs > rhs
++              { false, false, false, false }, // lhs = rhs
++              { true,  true,  true,  true  }, // lhs < rhs
++      };
++
++      static bool check_rhs[3][4] = {
++              // ss    su     us     uu
++              { true,  false, true,  true  }, // lhs > rhs
++              { true,  false, true,  true  }, // lhs = rhs
++              { true,  false, true,  true  }, // lhs < rhs
++      };
++
++      // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
++      if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      if (lhs_size > rhs_size) {
++              cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++              cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++      } else if (lhs_size == rhs_size) {
++              cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++              cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++      } else {
++              cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++              cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++      }
++
++      if (!cast_lhs && !cast_rhs)
++              return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++      if (cast_lhs && !skip_lhs_cast_check(stmt))
++              check_size_overflow(stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
++
++      if (cast_rhs)
++              check_size_overflow(stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
++
++      return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
++{
++      tree rhs1, new_rhs1, lhs = gimple_get_lhs(stmt);
++
++      if (gimple_plf(stmt, MY_STMT))
++              return lhs;
++
++      rhs1 = gimple_assign_rhs1(stmt);
++      if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      new_rhs1 = expand(visited, rhs1);
++
++      if (new_rhs1 == NULL_TREE)
++              return create_cast_assign(visited, stmt);
++
++      if (gimple_plf(stmt, NO_CAST_CHECK))
++              return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++      if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
++              tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
++
++              new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++              check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++      }
++
++      if (!gimple_assign_cast_p(stmt))
++              return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++      return create_cast_overflow_check(visited, new_rhs1, stmt);
++}
++
++static tree handle_unary_ops(struct pointer_set_t *visited, gimple stmt)
++{
++      tree rhs1, lhs = gimple_get_lhs(stmt);
++      gimple def_stmt = get_def_stmt(lhs);
++
++      gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
++      rhs1 = gimple_assign_rhs1(def_stmt);
++
++      if (is_gimple_constant(rhs1))
++              return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++
++      switch (TREE_CODE(rhs1)) {
++      case SSA_NAME:
++              return handle_unary_rhs(visited, def_stmt);
++      case ARRAY_REF:
++      case BIT_FIELD_REF:
++      case ADDR_EXPR:
++      case COMPONENT_REF:
++      case INDIRECT_REF:
++#if BUILDING_GCC_VERSION >= 4006
++      case MEM_REF:
++#endif
++      case TARGET_MEM_REF:
++              return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++      case PARM_DECL:
++      case VAR_DECL:
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      default:
++              debug_gimple_stmt(def_stmt);
++              debug_tree(rhs1);
++              gcc_unreachable();
++      }
++}
++
++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
++{
++      gimple cond_stmt;
++      gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
++
++      cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
++      gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
++      update_stmt(cond_stmt);
++}
++
++static tree create_string_param(tree string)
++{
++      tree i_type, a_type;
++      const int length = TREE_STRING_LENGTH(string);
++
++      gcc_assert(length > 0);
++
++      i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
++      a_type = build_array_type(char_type_node, i_type);
++
++      TREE_TYPE(string) = a_type;
++      TREE_CONSTANT(string) = 1;
++      TREE_READONLY(string) = 1;
++
++      return build1(ADDR_EXPR, ptr_type_node, string);
++}
++
++static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
++{
++      gimple func_stmt;
++      const_gimple def_stmt;
++      const_tree loc_line;
++      tree loc_file, ssa_name, current_func;
++      expanded_location xloc;
++      char *ssa_name_buf;
++      int len;
++      gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
++
++      def_stmt = get_def_stmt(arg);
++      xloc = expand_location(gimple_location(def_stmt));
++
++      if (!gimple_has_location(def_stmt)) {
++              xloc = expand_location(gimple_location(stmt));
++              if (!gimple_has_location(stmt))
++                      xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++      }
++
++      loc_line = build_int_cstu(unsigned_type_node, xloc.line);
++
++      loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
++      loc_file = create_string_param(loc_file);
++
++      current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
++      current_func = create_string_param(current_func);
++
++      gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
++      call_count++;
++      len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
++      gcc_assert(len > 0);
++      ssa_name = build_string(len + 1, ssa_name_buf);
++      free(ssa_name_buf);
++      ssa_name = create_string_param(ssa_name);
++
++      // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++      func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++
++      gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
++}
++
++static void __unused print_the_code_insertions(const_gimple stmt)
++{
++      location_t loc = gimple_location(stmt);
++
++      inform(loc, "Integer size_overflow check applied here.");
++}
++
++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++{
++      basic_block cond_bb, join_bb, bb_true;
++      edge e;
++      gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++      cond_bb = gimple_bb(stmt);
++      if (before)
++              gsi_prev(&gsi);
++      if (gsi_end_p(gsi))
++              e = split_block_after_labels(cond_bb);
++      else
++              e = split_block(cond_bb, gsi_stmt(gsi));
++      cond_bb = e->src;
++      join_bb = e->dest;
++      e->flags = EDGE_FALSE_VALUE;
++      e->probability = REG_BR_PROB_BASE;
++
++      bb_true = create_empty_bb(cond_bb);
++      make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
++      make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
++      make_edge(bb_true, join_bb, EDGE_FALLTHRU);
++
++      gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++      set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++      set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
++
++      if (current_loops != NULL) {
++              gcc_assert(cond_bb->loop_father == join_bb->loop_father);
++              add_bb_to_loop(bb_true, cond_bb->loop_father);
++      }
++
++      insert_cond(cond_bb, arg, cond_code, type_value);
++      insert_cond_result(bb_true, stmt, arg, min);
++
++//    print_the_code_insertions(stmt);
++}
++
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
++{
++      const_tree rhs_type = TREE_TYPE(rhs);
++      tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
++
++      gcc_assert(rhs_type != NULL_TREE);
++      if (TREE_CODE(rhs_type) == POINTER_TYPE)
++              return;
++
++      gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
++
++      if (is_const_plus_unsigned_signed_truncation(rhs))
++              return;
++
++      type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++      // typemax (-1) < typemin (0)
++      if (TREE_OVERFLOW(type_max))
++              return;
++
++      type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++      cast_rhs_type = TREE_TYPE(cast_rhs);
++      type_max_type = TREE_TYPE(type_max);
++      type_min_type = TREE_TYPE(type_min);
++      gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
++      gcc_assert(types_compatible_p(type_max_type, type_min_type));
++
++      insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
++      insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
++}
++
++static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++{
++      if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
++              return false;
++      if (!is_gimple_constant(rhs))
++              return false;
++      return true;
++}
++
++static tree get_def_stmt_rhs(const_tree var)
++{
++      tree rhs1, def_stmt_rhs1;
++      gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
++
++      def_stmt = get_def_stmt(var);
++      gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs1_def_stmt = get_def_stmt(rhs1);
++      if (!gimple_assign_cast_p(rhs1_def_stmt))
++              return rhs1;
++
++      def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++      def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
++
++      switch (gimple_code(def_stmt_rhs1_def_stmt)) {
++      case GIMPLE_CALL:
++      case GIMPLE_NOP:
++      case GIMPLE_ASM:
++      case GIMPLE_PHI:
++              return def_stmt_rhs1;
++      case GIMPLE_ASSIGN:
++              return rhs1;
++      default:
++              debug_gimple_stmt(def_stmt_rhs1_def_stmt);
++              gcc_unreachable();
++      }
++}
++
++static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++{
++      tree new_rhs, orig_rhs;
++      void (*gimple_assign_set_rhs)(gimple, tree);
++      tree rhs1 = gimple_assign_rhs1(stmt);
++      tree rhs2 = gimple_assign_rhs2(stmt);
++      tree lhs = gimple_get_lhs(stmt);
++
++      if (!check_overflow)
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      if (change_rhs == NULL_TREE)
++              return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++      if (new_rhs2 == NULL_TREE) {
++              orig_rhs = rhs1;
++              gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++      } else {
++              orig_rhs = rhs2;
++              gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++      }
++
++      check_size_overflow(stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
++
++      new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
++      gimple_assign_set_rhs(stmt, new_rhs);
++      update_stmt(stmt);
++
++      return create_assign(visited, stmt, lhs, AFTER_STMT);
++}
++
++static bool is_subtraction_special(const_gimple stmt)
++{
++      gimple rhs1_def_stmt, rhs2_def_stmt;
++      const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
++      enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
++      const_tree rhs1 = gimple_assign_rhs1(stmt);
++      const_tree rhs2 = gimple_assign_rhs2(stmt);
++
++      if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++              return false;
++
++      gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++      if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++              return false;
++
++      rhs1_def_stmt = get_def_stmt(rhs1);
++      rhs2_def_stmt = get_def_stmt(rhs2);
++      if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++              return false;
++
++      rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++      rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++      rhs1_def_stmt_lhs = gimple_get_lhs(rhs1_def_stmt);
++      rhs2_def_stmt_lhs = gimple_get_lhs(rhs2_def_stmt);
++      rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++      rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++      rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
++      rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
++      if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
++              return false;
++      if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
++              return false;
++
++      gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
++      gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
++      return true;
++}
++
++static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
++{
++      tree new_rhs1, new_rhs2;
++      tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++      gimple assign, stmt = get_def_stmt(lhs);
++      tree rhs1 = gimple_assign_rhs1(stmt);
++      tree rhs2 = gimple_assign_rhs2(stmt);
++
++      if (!is_subtraction_special(stmt))
++              return NULL_TREE;
++
++      new_rhs1 = expand(visited, rhs1);
++      new_rhs2 = expand(visited, rhs2);
++
++      new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
++      new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
++
++      if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
++              new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
++              new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
++      }
++
++      assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++      new_lhs = gimple_get_lhs(assign);
++      check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
++
++      return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
++{
++      const_gimple def_stmt;
++
++      if (TREE_CODE(rhs) != SSA_NAME)
++              return false;
++
++      if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
++              return false;
++
++      def_stmt = get_def_stmt(rhs);
++      if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
++              return false;
++
++      return true;
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs)
++{
++      tree rhs1, rhs2, new_lhs;
++      gimple def_stmt = get_def_stmt(lhs);
++      tree new_rhs1 = NULL_TREE;
++      tree new_rhs2 = NULL_TREE;
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs2 = gimple_assign_rhs2(def_stmt);
++
++      /* no DImode/TImode division in the 32/64 bit kernel */
++      switch (gimple_assign_rhs_code(def_stmt)) {
++      case RDIV_EXPR:
++      case TRUNC_DIV_EXPR:
++      case CEIL_DIV_EXPR:
++      case FLOOR_DIV_EXPR:
++      case ROUND_DIV_EXPR:
++      case TRUNC_MOD_EXPR:
++      case CEIL_MOD_EXPR:
++      case FLOOR_MOD_EXPR:
++      case ROUND_MOD_EXPR:
++      case EXACT_DIV_EXPR:
++      case POINTER_PLUS_EXPR:
++      case BIT_AND_EXPR:
++              return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++      default:
++              break;
++      }
++
++      new_lhs = handle_integer_truncation(visited, lhs);
++      if (new_lhs != NULL_TREE)
++              return new_lhs;
++
++      if (TREE_CODE(rhs1) == SSA_NAME)
++              new_rhs1 = expand(visited, rhs1);
++      if (TREE_CODE(rhs2) == SSA_NAME)
++              new_rhs2 = expand(visited, rhs2);
++
++      if (is_a_neg_overflow(def_stmt, rhs2))
++              return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, NULL_TREE);
++      if (is_a_neg_overflow(def_stmt, rhs1))
++              return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, new_rhs2);
++
++
++      if (is_a_constant_overflow(def_stmt, rhs2))
++              return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
++      if (is_a_constant_overflow(def_stmt, rhs1))
++              return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
++
++      return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++#if BUILDING_GCC_VERSION >= 4007
++static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs)
++{
++      if (is_gimple_constant(rhs))
++              return cast_a_tree(size_overflow_type, rhs);
++      if (TREE_CODE(rhs) != SSA_NAME)
++              return NULL_TREE;
++      return expand(visited, rhs);
++}
++
++static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs)
++{
++      tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
++      gimple def_stmt = get_def_stmt(lhs);
++
++      size_overflow_type = get_size_overflow_type(def_stmt, lhs);
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs2 = gimple_assign_rhs2(def_stmt);
++      rhs3 = gimple_assign_rhs3(def_stmt);
++      new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1);
++      new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
++      new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
++
++      return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
++}
++#endif
++
++static tree get_size_overflow_type(gimple stmt, const_tree node)
++{
++      const_tree type;
++      tree new_type;
++
++      gcc_assert(node != NULL_TREE);
++
++      type = TREE_TYPE(node);
++
++      if (gimple_plf(stmt, MY_STMT))
++              return TREE_TYPE(node);
++
++      switch (TYPE_MODE(type)) {
++      case QImode:
++              new_type = intHI_type_node;
++              break;
++      case HImode:
++              new_type = intSI_type_node;
++              break;
++      case SImode:
++              new_type = intDI_type_node;
++              break;
++      case DImode:
++              if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++                      new_type = intDI_type_node;
++              else
++                      new_type = intTI_type_node;
++              break;
++      default:
++              debug_tree((tree)node);
++              error("%s: unsupported gcc configuration.", __func__);
++              gcc_unreachable();
++      }
++
++      if (TYPE_QUALS(type) != 0)
++              return build_qualified_type(new_type, TYPE_QUALS(type));
++      return new_type;
++}
++
++static tree expand_visited(gimple def_stmt)
++{
++      const_gimple next_stmt;
++      gimple_stmt_iterator gsi;
++      enum gimple_code code = gimple_code(def_stmt);
++
++      if (code == GIMPLE_ASM)
++              return NULL_TREE;
++
++      gsi = gsi_for_stmt(def_stmt);
++      gsi_next(&gsi);
++
++      if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
++              return NULL_TREE;
++      gcc_assert(!gsi_end_p(gsi));
++      next_stmt = gsi_stmt(gsi);
++
++      if (gimple_code(def_stmt) == GIMPLE_PHI && !gimple_plf((gimple)next_stmt, MY_STMT))
++              return NULL_TREE;
++      gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
++
++      return get_lhs(next_stmt);
++}
++
++static tree expand(struct pointer_set_t *visited, tree lhs)
++{
++      gimple def_stmt;
++
++      if (skip_types(lhs))
++              return NULL_TREE;
++
++      def_stmt = get_def_stmt(lhs);
++
++      if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++              return NULL_TREE;
++
++      if (gimple_plf(def_stmt, MY_STMT))
++              return lhs;
++
++      if (pointer_set_contains(visited, def_stmt))
++              return expand_visited(def_stmt);
++
++      switch (gimple_code(def_stmt)) {
++      case GIMPLE_PHI:
++              return handle_phi(visited, lhs);
++      case GIMPLE_CALL:
++      case GIMPLE_ASM:
++              return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++      case GIMPLE_ASSIGN:
++              switch (gimple_num_ops(def_stmt)) {
++              case 2:
++                      return handle_unary_ops(visited, def_stmt);
++              case 3:
++                      return handle_binary_ops(visited, lhs);
++#if BUILDING_GCC_VERSION >= 4007
++              case 4:
++                      return handle_ternary_ops(visited, lhs);
++#endif
++              }
++      default:
++              debug_gimple_stmt(def_stmt);
++              error("%s: unknown gimple code", __func__);
++              gcc_unreachable();
++      }
++}
++
++static tree get_new_tree(gimple stmt, const_tree orig_node, tree new_node)
++{
++      const_gimple assign;
++      tree orig_type = TREE_TYPE(orig_node);
++      gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++      assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++      return gimple_get_lhs(assign);
++}
++
++static void change_function_arg(gimple stmt, const_tree orig_arg, unsigned int argnum, tree new_arg)
++{
++      gimple_call_set_arg(stmt, argnum, get_new_tree(stmt, orig_arg, new_arg));
++      update_stmt(stmt);
++}
++
++static void change_function_return(gimple stmt, const_tree orig_ret, tree new_ret)
++{
++      gimple_return_set_retval(stmt, get_new_tree(stmt, orig_ret, new_ret));
++      update_stmt(stmt);
++}
++
++static bool get_function_arg(unsigned int* argnum, const_tree fndecl)
++{
++      tree arg;
++      const_tree origarg;
++
++      if (!DECL_ABSTRACT_ORIGIN(fndecl))
++              return true;
++
++      origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
++      while (origarg && *argnum) {
++              (*argnum)--;
++              origarg = TREE_CHAIN(origarg);
++      }
++
++      gcc_assert(*argnum == 0);
++
++      gcc_assert(origarg != NULL_TREE);
++      *argnum = 0;
++      for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg), (*argnum)++)
++              if (operand_equal_p(origarg, arg, 0) || !strcmp(NAME(origarg), NAME(arg)))
++                      return true;
++      return false;
++}
++
++static enum mark walk_phi(struct pointer_set_t *visited, bool *search_err_code, const_tree result)
++{
++      gimple phi = get_def_stmt(result);
++      unsigned int i, n = gimple_phi_num_args(phi);
++
++      if (!phi)
++              return MARK_NO;
++
++      pointer_set_insert(visited, phi);
++      for (i = 0; i < n; i++) {
++              enum mark marked;
++              const_tree arg = gimple_phi_arg_def(phi, i);
++              marked = pre_expand(visited, search_err_code, arg);
++              if (marked != MARK_NO)
++                      return marked;
++      }
++      return MARK_NO;
++}
++
++static enum mark walk_unary_ops(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
++{
++      gimple def_stmt = get_def_stmt(lhs);
++      const_tree rhs;
++
++      if (!def_stmt)
++              return MARK_NO;
++
++      rhs = gimple_assign_rhs1(def_stmt);
++
++      def_stmt = get_def_stmt(rhs);
++      if (is_gimple_constant(rhs))
++              search_err_code[FROM_CONST] = true;
++
++      return pre_expand(visited, search_err_code, rhs);
++}
++
++static enum mark walk_binary_ops(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
++{
++      gimple def_stmt = get_def_stmt(lhs);
++      const_tree rhs1, rhs2;
++      enum mark marked;
++
++      if (!def_stmt)
++              return MARK_NO;
++
++      search_err_code[CAST_ONLY] = false;
++
++      rhs1 = gimple_assign_rhs1(def_stmt);
++      rhs2 = gimple_assign_rhs2(def_stmt);
++      marked = pre_expand(visited, search_err_code, rhs1);
++      if (marked != MARK_NO)
++              return marked;
++      return pre_expand(visited, search_err_code, rhs2);
++}
++
++static const_tree search_field_decl(const_tree comp_ref)
++{
++      const_tree field = NULL_TREE;
++      unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
++
++      for (i = 0; i < len; i++) {
++              field = TREE_OPERAND(comp_ref, i);
++              if (TREE_CODE(field) == FIELD_DECL)
++                      break;
++      }
++      gcc_assert(TREE_CODE(field) == FIELD_DECL);
++      return field;
++}
++
++static enum mark mark_status(const_tree fndecl, unsigned int argnum)
++{
++      const_tree attr, p;
++
++      // mm/filemap.c D.35286_51 = D.35283_46 (file_10(D), mapping_11, pos_1, D.35273_50, D.35285_49, page.14_48, fsdata.15_47);
++      if (fndecl == NULL_TREE)
++              return MARK_NO;
++
++      attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
++      if (!attr || !TREE_VALUE(attr))
++              return MARK_NO;
++
++      p = TREE_VALUE(attr);
++      if (TREE_INT_CST_HIGH(TREE_VALUE(p)) == -1)
++              return MARK_TURN_OFF;
++      if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
++              return MARK_NOT_INTENTIONAL;
++      if (argnum == 0) {
++              gcc_assert(current_function_decl == fndecl);
++              return MARK_NO;
++      }
++
++      do {
++              if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
++                      return MARK_YES;
++              p = TREE_CHAIN(p);
++      } while (p);
++
++      return MARK_NO;
++}
++
++static void print_missing_msg(tree func, unsigned int argnum)
++{
++      unsigned int new_hash;
++      size_t len;
++      unsigned char tree_codes[CODES_LIMIT];
++      location_t loc;
++      const char *curfunc;
++
++      func = get_original_function_decl(func);
++      loc = DECL_SOURCE_LOCATION(func);
++      curfunc = get_asm_name(func);
++
++      len = get_function_decl(func, tree_codes);
++      new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
++      inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
++}
++
++static unsigned int search_missing_attribute(const_tree arg)
++{
++      unsigned int argnum;
++      const struct size_overflow_hash *hash;
++      const_tree type = TREE_TYPE(arg);
++      tree func = get_original_function_decl(current_function_decl);
++
++      gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
++
++      if (TREE_CODE(type) == POINTER_TYPE)
++              return 0;
++
++      argnum = find_arg_number(arg, func);
++      if (argnum == 0)
++              return 0;
++
++      if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
++              return argnum;
++
++      hash = get_function_hash(func);
++      if (!hash || !(hash->param & (1U << argnum))) {
++              print_missing_msg(func, argnum);
++              return 0;
++      }
++      return argnum;
++}
++
++static enum mark is_already_marked(const_tree lhs)
++{
++      unsigned int argnum;
++      const_tree fndecl;
++
++      argnum = search_missing_attribute(lhs);
++      fndecl = get_original_function_decl(current_function_decl);
++      if (argnum && mark_status(fndecl, argnum) == MARK_YES)
++              return MARK_YES;
++      return MARK_NO;
++}
++
++static enum mark pre_expand(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
++{
++      const_gimple def_stmt;
++
++      if (skip_types(lhs))
++              return MARK_NO;
++
++      if (TREE_CODE(lhs) == PARM_DECL)
++              return is_already_marked(lhs);
++
++      if (TREE_CODE(lhs) == COMPONENT_REF) {
++              const_tree field, attr;
++
++              field = search_field_decl(lhs);
++              attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
++              if (!attr || !TREE_VALUE(attr))
++                      return MARK_NO;
++              return MARK_YES;
++      }
++
++      def_stmt = get_def_stmt(lhs);
++
++      if (!def_stmt)
++              return MARK_NO;
++
++      if (pointer_set_contains(visited, def_stmt))
++              return MARK_NO;
++
++      switch (gimple_code(def_stmt)) {
++      case GIMPLE_NOP:
++              if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL)
++                      return is_already_marked(lhs);
++              return MARK_NO;
++      case GIMPLE_PHI:
++              return walk_phi(visited, search_err_code, lhs);
++      case GIMPLE_CALL:
++              if (mark_status((gimple_call_fndecl(def_stmt)), 0) == MARK_TURN_OFF)
++                      return MARK_TURN_OFF;
++              check_function_hash(def_stmt);
++              return MARK_NO;
++      case GIMPLE_ASM:
++              search_err_code[CAST_ONLY] = false;
++              return MARK_NO;
++      case GIMPLE_ASSIGN:
++              switch (gimple_num_ops(def_stmt)) {
++              case 2:
++                      return walk_unary_ops(visited, search_err_code, lhs);
++              case 3:
++                      return walk_binary_ops(visited, search_err_code, lhs);
++              }
++      default:
++              debug_gimple_stmt((gimple)def_stmt);
++              error("%s: unknown gimple code", __func__);
++              gcc_unreachable();
++      }
++}
++
++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
++static bool skip_asm(const_tree arg)
++{
++      gimple def_stmt = get_def_stmt(arg);
++
++      if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++              return false;
++
++      def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++      return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
++}
++
++/*
++0</MARK_YES: no dup, search attributes (so, int)
++0/MARK_NOT_INTENTIONAL: no dup, search attribute (int)
++-1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
++*/
++
++static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum, bool where)
++{
++      struct pointer_set_t *visited;
++      enum mark is_marked, is_found;
++      location_t loc;
++      bool search_err_code[2] = {true, false};
++
++      is_marked = mark_status(current_function_decl, 0);
++      if (is_marked == MARK_TURN_OFF)
++              return true;
++
++      is_marked = mark_status(fndecl, argnum + 1);
++      if (is_marked == MARK_TURN_OFF || is_marked == MARK_NOT_INTENTIONAL)
++              return true;
++
++      visited = pointer_set_create();
++      is_found = pre_expand(visited, search_err_code, arg);
++      pointer_set_destroy(visited);
++
++      if (where == FROM_RET && search_err_code[CAST_ONLY] && search_err_code[FROM_CONST])
++              return true;
++
++      if (where == FROM_ARG && skip_asm(arg))
++              return true;
++
++      if (is_found == MARK_TURN_OFF)
++              return true;
++
++      if ((is_found == MARK_YES && is_marked == MARK_YES))
++              return true;
++
++      if (is_found == MARK_YES) {
++              loc = DECL_SOURCE_LOCATION(fndecl);
++              inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1);
++              return true;
++      }
++      return false;
++}
++
++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
++{
++      struct pointer_set_t *visited;
++      tree arg, new_arg;
++      bool match;
++
++      if (argnum == 0)
++              return;
++
++      argnum--;
++
++      match = get_function_arg(&argnum, fndecl);
++      if (!match)
++              return;
++      gcc_assert(gimple_call_num_args(stmt) > argnum);
++      arg = gimple_call_arg(stmt, argnum);
++      if (arg == NULL_TREE)
++              return;
++
++      if (skip_types(arg))
++              return;
++
++      if (search_attributes(fndecl, arg, argnum, FROM_ARG))
++              return;
++
++      visited = pointer_set_create();
++      new_arg = expand(visited, arg);
++      pointer_set_destroy(visited);
++
++      if (new_arg == NULL_TREE)
++              return;
++
++      change_function_arg(stmt, arg, argnum, new_arg);
++      check_size_overflow(stmt, TREE_TYPE(new_arg), new_arg, arg, BEFORE_STMT);
++}
++
++static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl)
++{
++      tree p = TREE_VALUE(attr);
++      do {
++              handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p)));
++              p = TREE_CHAIN(p);
++      } while (p);
++}
++
++static void handle_function_by_hash(gimple stmt, tree fndecl)
++{
++      unsigned int num;
++      const struct size_overflow_hash *hash;
++
++      hash = get_function_hash(fndecl);
++      if (!hash)
++              return;
++
++      for (num = 0; num <= MAX_PARAM; num++)
++              if (hash->param & (1U << num))
++                      handle_function_arg(stmt, fndecl, num);
++}
++
++static bool check_return_value(void)
++{
++      const struct size_overflow_hash *hash;
++
++      hash = get_function_hash(current_function_decl);
++      if (!hash || !(hash->param & 1U << 0))
++              return false;
++
++      return true;
++}
++
++static void handle_return_value(gimple ret_stmt)
++{
++      struct pointer_set_t *visited;
++      tree ret, new_ret;
++
++      if (gimple_code(ret_stmt) != GIMPLE_RETURN)
++              return;
++
++      ret = gimple_return_retval(ret_stmt);
++
++      if (skip_types(ret))
++              return;
++
++      if (search_attributes(current_function_decl, ret, 0, FROM_RET))
++              return;
++
++      visited = pointer_set_create();
++      new_ret = expand(visited, ret);
++      pointer_set_destroy(visited);
++
++      change_function_return(ret_stmt, ret, new_ret);
++      check_size_overflow(ret_stmt, TREE_TYPE(new_ret), new_ret, ret, BEFORE_STMT);
++}
++
++static void set_plf_false(void)
++{
++      basic_block bb;
++
++      FOR_ALL_BB(bb) {
++              gimple_stmt_iterator si;
++
++              for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++                      gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++              for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++                      gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++      }
++}
++
++static unsigned int handle_function(void)
++{
++      basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
++      bool check_ret;
++
++      set_plf_false();
++
++      check_ret = check_return_value();
++
++      do {
++              gimple_stmt_iterator gsi;
++              next = bb->next_bb;
++
++              for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++                      tree fndecl, attr;
++                      gimple stmt = gsi_stmt(gsi);
++
++                      if (check_ret)
++                              handle_return_value(stmt);
++
++                      if (!(is_gimple_call(stmt)))
++                              continue;
++                      fndecl = gimple_call_fndecl(stmt);
++                      if (fndecl == NULL_TREE)
++                              continue;
++                      if (gimple_call_num_args(stmt) == 0)
++                              continue;
++                      attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
++                      if (!attr || !TREE_VALUE(attr))
++                              handle_function_by_hash(stmt, fndecl);
++                      else
++                              handle_function_by_attribute(stmt, attr, fndecl);
++                      gsi = gsi_for_stmt(stmt);
++                      next = gimple_bb(stmt)->next_bb;
++              }
++              bb = next;
++      } while (bb);
++      return 0;
++}
++
++static struct gimple_opt_pass size_overflow_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "size_overflow",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = NULL,
++              .execute                = handle_function,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = PROP_cfg,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
++      }
++};
++
++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
++{
++      tree fntype;
++
++      const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
++
++      // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
++      fntype = build_function_type_list(void_type_node,
++                                        const_char_ptr_type_node,
++                                        unsigned_type_node,
++                                        const_char_ptr_type_node,
++                                        const_char_ptr_type_node,
++                                        NULL_TREE);
++      report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
++
++      DECL_ASSEMBLER_NAME(report_size_overflow_decl);
++      TREE_PUBLIC(report_size_overflow_decl) = 1;
++      DECL_EXTERNAL(report_size_overflow_decl) = 1;
++      DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++      TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      int i;
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      bool enable = true;
++
++      struct register_pass_info size_overflow_pass_info = {
++              .pass                           = &size_overflow_pass.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      for (i = 0; i < argc; ++i) {
++              if (!strcmp(argv[i].key, "no-size-overflow")) {
++                      enable = false;
++                      continue;
++              }
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
++      if (enable) {
++              register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++              register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
++      }
++      register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++      return 0;
++}
+diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
+new file mode 100644
+index 0000000..ac2901e
+--- /dev/null
++++ b/tools/gcc/stackleak_plugin.c
+@@ -0,0 +1,327 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to help implement various PaX features
++ *
++ * - track lowest stack pointer
++ *
++ * TODO:
++ * - initialize all local variables
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static int track_frame_size = -1;
++static const char track_function[] = "pax_track_stack";
++static const char check_function[] = "pax_check_alloca";
++static bool init_locals;
++
++static struct plugin_info stackleak_plugin_info = {
++      .version        = "201302112000",
++      .help           = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
++//                      "initialize-locals\t\tforcibly initialize all stack frames\n"
++};
++
++static bool gate_stackleak_track_stack(void);
++static unsigned int execute_stackleak_tree_instrument(void);
++static unsigned int execute_stackleak_final(void);
++
++static struct gimple_opt_pass stackleak_tree_instrument_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "stackleak_tree_instrument",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = gate_stackleak_track_stack,
++              .execute                = execute_stackleak_tree_instrument,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = PROP_gimple_leh | PROP_cfg,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++              .todo_flags_finish      = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++      }
++};
++
++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
++      .pass = {
++              .type                   = RTL_PASS,
++              .name                   = "stackleak_final",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = gate_stackleak_track_stack,
++              .execute                = execute_stackleak_final,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = 0,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_dump_func
++      }
++};
++
++static bool gate_stackleak_track_stack(void)
++{
++      return track_frame_size >= 0;
++}
++
++static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
++{
++      gimple check_alloca;
++      tree fntype, fndecl, alloca_size;
++
++      fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++      fndecl = build_fn_decl(check_function, fntype);
++      DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++      // insert call to void pax_check_alloca(unsigned long size)
++      alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
++      check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++      gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
++}
++
++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
++{
++      gimple track_stack;
++      tree fntype, fndecl;
++
++      fntype = build_function_type_list(void_type_node, NULL_TREE);
++      fndecl = build_fn_decl(track_function, fntype);
++      DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++      // insert call to void pax_track_stack(void)
++      track_stack = gimple_build_call(fndecl, 0);
++      gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
++}
++
++#if BUILDING_GCC_VERSION == 4005
++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
++{
++      tree fndecl;
++
++      if (!is_gimple_call(stmt))
++              return false;
++      fndecl = gimple_call_fndecl(stmt);
++      if (!fndecl)
++              return false;
++      if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
++              return false;
++//    print_node(stderr, "pax", fndecl, 4);
++      return DECL_FUNCTION_CODE(fndecl) == code;
++}
++#endif
++
++static bool is_alloca(gimple stmt)
++{
++      if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
++              return true;
++
++#if BUILDING_GCC_VERSION >= 4007
++      if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
++              return true;
++#endif
++
++      return false;
++}
++
++static unsigned int execute_stackleak_tree_instrument(void)
++{
++      basic_block bb, entry_bb;
++      bool prologue_instrumented = false, is_leaf = true;
++
++      entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
++
++      // 1. loop through BBs and GIMPLE statements
++      FOR_EACH_BB(bb) {
++              gimple_stmt_iterator gsi;
++
++              for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++                      gimple stmt;
++
++                      stmt = gsi_stmt(gsi);
++
++                      if (is_gimple_call(stmt))
++                              is_leaf = false;
++
++                      // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
++                      if (!is_alloca(stmt))
++                              continue;
++
++                      // 2. insert stack overflow check before each __builtin_alloca call
++                      stackleak_check_alloca(&gsi);
++
++                      // 3. insert track call after each __builtin_alloca call
++                      stackleak_add_instrumentation(&gsi);
++                      if (bb == entry_bb)
++                              prologue_instrumented = true;
++              }
++      }
++
++      // special cases for some bad linux code: taking the address of static inline functions will materialize them
++      // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
++      // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering  ABI.
++      // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
++      if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
++              return 0;
++      if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
++              return 0;
++
++      // 4. insert track call at the beginning
++      if (!prologue_instrumented) {
++              gimple_stmt_iterator gsi;
++
++              bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++              if (dom_info_available_p(CDI_DOMINATORS))
++                      set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++              gsi = gsi_start_bb(bb);
++              stackleak_add_instrumentation(&gsi);
++      }
++
++      return 0;
++}
++
++static unsigned int execute_stackleak_final(void)
++{
++      rtx insn, next;
++
++      if (cfun->calls_alloca)
++              return 0;
++
++      // keep calls only if function frame is big enough
++      if (get_frame_size() >= track_frame_size)
++              return 0;
++
++      // 1. find pax_track_stack calls
++      for (insn = get_insns(); insn; insn = next) {
++              // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
++              rtx body;
++
++              next = NEXT_INSN(insn);
++              if (!CALL_P(insn))
++                      continue;
++              body = PATTERN(insn);
++              if (GET_CODE(body) != CALL)
++                      continue;
++              body = XEXP(body, 0);
++              if (GET_CODE(body) != MEM)
++                      continue;
++              body = XEXP(body, 0);
++              if (GET_CODE(body) != SYMBOL_REF)
++                      continue;
++              if (strcmp(XSTR(body, 0), track_function))
++                      continue;
++//            warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++              // 2. delete call
++              delete_insn_and_edges(insn);
++#if BUILDING_GCC_VERSION >= 4007
++              if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
++                      insn = next;
++                      next = NEXT_INSN(insn);
++                      delete_insn_and_edges(insn);
++              }
++#endif
++      }
++
++//    print_simple_rtl(stderr, get_insns());
++//    print_rtl(stderr, get_insns());
++//    warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++
++      return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      int i;
++      struct register_pass_info stackleak_tree_instrument_pass_info = {
++              .pass                           = &stackleak_tree_instrument_pass.pass,
++//            .reference_pass_name            = "tree_profile",
++              .reference_pass_name            = "optimized",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_BEFORE
++      };
++      struct register_pass_info stackleak_final_pass_info = {
++              .pass                           = &stackleak_final_rtl_opt_pass.pass,
++              .reference_pass_name            = "final",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_BEFORE
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
++
++      for (i = 0; i < argc; ++i) {
++              if (!strcmp(argv[i].key, "track-lowest-sp")) {
++                      if (!argv[i].value) {
++                              error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++                              continue;
++                      }
++                      track_frame_size = atoi(argv[i].value);
++                      if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
++                              error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++                      continue;
++              }
++              if (!strcmp(argv[i].key, "initialize-locals")) {
++                      if (argv[i].value) {
++                              error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++                              continue;
++                      }
++                      init_locals = true;
++                      continue;
++              }
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++      }
++
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
++      register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
++
++      return 0;
++}
+diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
+new file mode 100644
+index 0000000..4fae911
+--- /dev/null
++++ b/tools/gcc/structleak_plugin.c
+@@ -0,0 +1,277 @@
++/*
++ * Copyright 2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ *       but for the kernel it doesn't matter since it doesn't link against
++ *       any of the gcc libraries
++ *
++ * gcc plugin to forcibly initialize certain local variables that could
++ * otherwise leak kernel stack to userland if they aren't properly initialized
++ * by later code
++ *
++ * Homepage: http://pax.grsecurity.net/
++ *
++ * Usage:
++ * $ # for 4.5/4.6/C based 4.7
++ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
++ * $ # for C++ based 4.7/4.8+
++ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
++ * $ gcc -fplugin=./structleak_plugin.so test.c -O2
++ *
++ * TODO: eliminate redundant initializers
++ *       increase type coverage
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "toplev.h"
++#include "function.h"
++#include "tree-flow.h"
++#include "plugin.h"
++#include "gimple.h"
++#include "diagnostic.h"
++#include "cfgloop.h"
++#include "langhooks.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
++
++// unused type flag in all versions 4.5-4.8
++#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
++
++int plugin_is_GPL_compatible;
++void debug_gimple_stmt(gimple gs);
++
++static struct plugin_info structleak_plugin_info = {
++      .version        = "201304082245",
++      .help           = "disable\tdo not activate plugin\n",
++};
++
++static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++      *no_add_attrs = true;
++
++      // check for types? for now accept everything linux has to offer
++      if (TREE_CODE(*node) != FIELD_DECL)
++              return NULL_TREE;
++
++      *no_add_attrs = false;
++      return NULL_TREE;
++}
++
++static struct attribute_spec user_attr = {
++      .name                   = "user",
++      .min_length             = 0,
++      .max_length             = 0,
++      .decl_required          = false,
++      .type_required          = false,
++      .function_type_required = false,
++      .handler                = handle_user_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++      .affects_type_identity  = true
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++      register_attribute(&user_attr);
++//    register_attribute(&force_attr);
++}
++
++static tree get_field_type(tree field)
++{
++      return strip_array_types(TREE_TYPE(field));
++}
++
++static bool is_userspace_type(tree type)
++{
++      tree field;
++
++      for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
++              tree fieldtype = get_field_type(field);
++              enum tree_code code = TREE_CODE(fieldtype);
++
++              if (code == RECORD_TYPE || code == UNION_TYPE)
++                      if (is_userspace_type(fieldtype))
++                              return true;
++
++              if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
++                      return true;
++      }
++      return false;
++}
++
++static void finish_type(void *event_data, void *data)
++{
++      tree type = (tree)event_data;
++
++      if (TYPE_USERSPACE(type))
++              return;
++
++      if (is_userspace_type(type))
++              TYPE_USERSPACE(type) = 1;
++}
++
++static void initialize(tree var)
++{
++      basic_block bb;
++      gimple_stmt_iterator gsi;
++      tree initializer;
++      gimple init_stmt;
++
++      // this is the original entry bb before the forced split
++      // TODO: check further BBs in case more splits occured before us
++      bb = ENTRY_BLOCK_PTR->next_bb->next_bb;
++
++      // first check if the variable is already initialized, warn otherwise
++      for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++              gimple stmt = gsi_stmt(gsi);
++              tree rhs1;
++
++              // we're looking for an assignment of a single rhs...
++              if (!gimple_assign_single_p(stmt))
++                      continue;
++              rhs1 = gimple_assign_rhs1(stmt);
++#if BUILDING_GCC_VERSION >= 4007
++              // ... of a non-clobbering expression...
++              if (TREE_CLOBBER_P(rhs1))
++                      continue;
++#endif
++              // ... to our variable...
++              if (gimple_get_lhs(stmt) != var)
++                      continue;
++              // if it's an initializer then we're good
++              if (TREE_CODE(rhs1) == CONSTRUCTOR)
++                      return;
++      }
++
++      // these aren't the 0days you're looking for
++//    inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
++
++      // build the initializer expression
++      initializer = build_constructor(TREE_TYPE(var), NULL);
++
++      // build the initializer stmt
++      init_stmt = gimple_build_assign(var, initializer);
++      gsi = gsi_start_bb(ENTRY_BLOCK_PTR->next_bb);
++      gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
++      update_stmt(init_stmt);
++}
++
++static unsigned int handle_function(void)
++{
++      basic_block bb;
++      unsigned int ret = 0;
++      tree var;
++
++#if BUILDING_GCC_VERSION == 4005
++      tree vars;
++#else
++      unsigned int i;
++#endif
++
++      // split the first bb where we can put the forced initializers
++      bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++      if (dom_info_available_p(CDI_DOMINATORS))
++              set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++
++      // enumarate all local variables and forcibly initialize our targets
++#if BUILDING_GCC_VERSION == 4005
++      for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
++              var = TREE_VALUE(vars);
++#else
++      FOR_EACH_LOCAL_DECL(cfun, i, var) {
++#endif
++              tree type = TREE_TYPE(var);
++
++              gcc_assert(DECL_P(var));
++              if (!auto_var_in_fn_p(var, current_function_decl))
++                      continue;
++
++              // only care about structure types
++              if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++                      continue;
++
++              // if the type is of interest, examine the variable
++              if (TYPE_USERSPACE(type))
++                      initialize(var);
++      }
++
++      return ret;
++}
++
++static struct gimple_opt_pass structleak_pass = {
++      .pass = {
++              .type                   = GIMPLE_PASS,
++              .name                   = "structleak",
++#if BUILDING_GCC_VERSION >= 4008
++              .optinfo_flags          = OPTGROUP_NONE,
++#endif
++              .gate                   = NULL,
++              .execute                = handle_function,
++              .sub                    = NULL,
++              .next                   = NULL,
++              .static_pass_number     = 0,
++              .tv_id                  = TV_NONE,
++              .properties_required    = PROP_cfg,
++              .properties_provided    = 0,
++              .properties_destroyed   = 0,
++              .todo_flags_start       = 0,
++              .todo_flags_finish      = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
++      }
++};
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++      int i;
++      const char * const plugin_name = plugin_info->base_name;
++      const int argc = plugin_info->argc;
++      const struct plugin_argument * const argv = plugin_info->argv;
++      bool enable = true;
++
++      struct register_pass_info structleak_pass_info = {
++              .pass                           = &structleak_pass.pass,
++              .reference_pass_name            = "ssa",
++              .ref_pass_instance_number       = 1,
++              .pos_op                         = PASS_POS_INSERT_AFTER
++      };
++
++      if (!plugin_default_version_check(version, &gcc_version)) {
++              error(G_("incompatible gcc/plugin versions"));
++              return 1;
++      }
++
++      if (strcmp(lang_hooks.name, "GNU C")) {
++              inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
++              enable = false;
++      }
++
++      for (i = 0; i < argc; ++i) {
++              if (!strcmp(argv[i].key, "disable")) {
++                      enable = false;
++                      continue;
++              }
++              error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++      }
++
++      register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
++      if (enable) {
++              register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
++              register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
++      }
++      register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++      return 0;
++}
+diff --git a/tools/lib/lk/Makefile b/tools/lib/lk/Makefile
+index 926cbf3..b8403e0 100644
+--- a/tools/lib/lk/Makefile
++++ b/tools/lib/lk/Makefile
+@@ -10,7 +10,7 @@ LIB_OBJS += $(OUTPUT)debugfs.o
+ LIBFILE = liblk.a
+-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
++CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
+ EXTLIBS = -lpthread -lrt -lelf -lm
+ ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+ ALL_LDFLAGS = $(LDFLAGS)
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index b0f164b..63c9f7d 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -188,7 +188,7 @@ endif
+ ifndef PERF_DEBUG
+       ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
+-              CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2
++              CFLAGS := $(CFLAGS) -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
+       endif
+ endif
+diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
+index 6789d78..4afd019e 100644
+--- a/tools/perf/util/include/asm/alternative-asm.h
++++ b/tools/perf/util/include/asm/alternative-asm.h
+@@ -5,4 +5,7 @@
+ #define altinstruction_entry #
++      .macro pax_force_retaddr rip=0, reload=0
++      .endm
++
+ #endif
+diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
+index 96b919d..c49bb74 100644
+--- a/tools/perf/util/include/linux/compiler.h
++++ b/tools/perf/util/include/linux/compiler.h
+@@ -18,4 +18,12 @@
+ #define __force
+ #endif
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __intentional_overflow
++# define __intentional_overflow(...)
++#endif
++
+ #endif
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 302681c..3bde377 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
+ static cpumask_var_t cpus_hardware_enabled;
+ static int kvm_usage_count = 0;
+-static atomic_t hardware_enable_failed;
++static atomic_unchecked_t hardware_enable_failed;
+ struct kmem_cache *kvm_vcpu_cache;
+ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+-static __read_mostly struct preempt_ops kvm_preempt_ops;
++static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
++static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
++static struct preempt_ops kvm_preempt_ops = {
++      .sched_in = kvm_sched_in,
++      .sched_out = kvm_sched_out,
++};
+ struct dentry *kvm_debugfs_dir;
+@@ -766,7 +771,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+       /* We can read the guest memory with __xxx_user() later on. */
+       if ((mem->slot < KVM_USER_MEM_SLOTS) &&
+           ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+-           !access_ok(VERIFY_WRITE,
++           !__access_ok(VERIFY_WRITE,
+                       (void __user *)(unsigned long)mem->userspace_addr,
+                       mem->memory_size)))
+               goto out;
+@@ -1878,7 +1883,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+       return 0;
+ }
+-static struct file_operations kvm_vcpu_fops = {
++static file_operations_no_const kvm_vcpu_fops __read_only = {
+       .release        = kvm_vcpu_release,
+       .unlocked_ioctl = kvm_vcpu_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -2561,7 +2566,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+       return 0;
+ }
+-static struct file_operations kvm_vm_fops = {
++static file_operations_no_const kvm_vm_fops __read_only = {
+       .release        = kvm_vm_release,
+       .unlocked_ioctl = kvm_vm_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -2662,7 +2667,7 @@ out:
+       return r;
+ }
+-static struct file_operations kvm_chardev_ops = {
++static file_operations_no_const kvm_chardev_ops __read_only = {
+       .unlocked_ioctl = kvm_dev_ioctl,
+       .compat_ioctl   = kvm_dev_ioctl,
+       .llseek         = noop_llseek,
+@@ -2688,7 +2693,7 @@ static void hardware_enable_nolock(void *junk)
+       if (r) {
+               cpumask_clear_cpu(cpu, cpus_hardware_enabled);
+-              atomic_inc(&hardware_enable_failed);
++              atomic_inc_unchecked(&hardware_enable_failed);
+               printk(KERN_INFO "kvm: enabling virtualization on "
+                                "CPU%d failed\n", cpu);
+       }
+@@ -2742,10 +2747,10 @@ static int hardware_enable_all(void)
+       kvm_usage_count++;
+       if (kvm_usage_count == 1) {
+-              atomic_set(&hardware_enable_failed, 0);
++              atomic_set_unchecked(&hardware_enable_failed, 0);
+               on_each_cpu(hardware_enable_nolock, NULL, 1);
+-              if (atomic_read(&hardware_enable_failed)) {
++              if (atomic_read_unchecked(&hardware_enable_failed)) {
+                       hardware_disable_all_nolock();
+                       r = -EBUSY;
+               }
+@@ -3099,7 +3104,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+       kvm_arch_vcpu_put(vcpu);
+ }
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+                 struct module *module)
+ {
+       int r;
+@@ -3146,7 +3151,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+       if (!vcpu_align)
+               vcpu_align = __alignof__(struct kvm_vcpu);
+       kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
+-                                         0, NULL);
++                                         SLAB_USERCOPY, NULL);
+       if (!kvm_vcpu_cache) {
+               r = -ENOMEM;
+               goto out_free_3;
+@@ -3156,9 +3161,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+       if (r)
+               goto out_free;
++      pax_open_kernel();
+       kvm_chardev_ops.owner = module;
+       kvm_vm_fops.owner = module;
+       kvm_vcpu_fops.owner = module;
++      pax_close_kernel();
+       r = misc_register(&kvm_dev);
+       if (r) {
+@@ -3168,9 +3175,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+       register_syscore_ops(&kvm_syscore_ops);
+-      kvm_preempt_ops.sched_in = kvm_sched_in;
+-      kvm_preempt_ops.sched_out = kvm_sched_out;
+-
+       r = kvm_init_debug();
+       if (r) {
+               printk(KERN_ERR "kvm: create debugfs files failed\n");
diff --git a/src/patches/imq_kernel3.10.patch b/src/patches/imq_kernel3.10.patch
new file mode 100644 (file)
index 0000000..e98bdc5
--- /dev/null
@@ -0,0 +1,1568 @@
+diff -uNr linux-3.9.1/drivers/net/imq.c linux-3.9.1-imqmq/drivers/net/imq.c
+--- linux-3.9.1/drivers/net/imq.c      1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/drivers/net/imq.c        2013-05-08 17:30:41.715552053 +0300
+@@ -0,0 +1,861 @@
++/*
++ *             Pseudo-driver for the intermediate queue device.
++ *
++ *             This program is free software; you can redistribute it and/or
++ *             modify it under the terms of the GNU General Public License
++ *             as published by the Free Software Foundation; either version
++ *             2 of the License, or (at your option) any later version.
++ *
++ * Authors:    Patrick McHardy, <kaber@trash.net>
++ *
++ *            The first version was written by Martin Devera, <devik@cdi.cz>
++ *
++ * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
++ *              - Update patch to 2.4.21
++ *             Sebastian Strollo <sstrollo@nortelnetworks.com>
++ *              - Fix "Dead-loop on netdevice imq"-issue
++ *             Marcel Sebek <sebek64@post.cz>
++ *              - Update to 2.6.2-rc1
++ *
++ *           After some time of inactivity there is a group taking care
++ *           of IMQ again: http://www.linuximq.net
++ *
++ *
++ *           2004/06/30 - New version of IMQ patch to kernels <=2.6.7
++ *             including the following changes:
++ *
++ *           - Correction of ipv6 support "+"s issue (Hasso Tepper)
++ *           - Correction of imq_init_devs() issue that resulted in
++ *           kernel OOPS unloading IMQ as module (Norbert Buchmuller)
++ *           - Addition of functionality to choose number of IMQ devices
++ *           during kernel config (Andre Correa)
++ *           - Addition of functionality to choose how IMQ hooks on
++ *           PRE and POSTROUTING (after or before NAT) (Andre Correa)
++ *           - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
++ *
++ *
++ *             2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
++ *             released with almost no problems. 2.6.14-x was released
++ *             with some important changes: nfcache was removed; After
++ *             some weeks of trouble we figured out that some IMQ fields
++ *             in skb were missing in skbuff.c - skb_clone and copy_skb_header.
++ *             These functions are correctly patched by this new patch version.
++ *
++ *             Thanks for all who helped to figure out all the problems with
++ *             2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
++ *             Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
++ *             I didn't forget anybody). I apologize again for my lack of time.
++ *
++ *
++ *             2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
++ *             of qdisc_restart() and moved qdisc_run() to tasklet to avoid
++ *             recursive locking. New initialization routines to fix 'rmmod' not
++ *             working anymore. Used code from ifb.c. (Jussi Kivilinna)
++ *
++ *             2008/08/06 - 2.6.26 - (JK)
++ *              - Replaced tasklet with 'netif_schedule()'.
++ *              - Cleaned up and added comments for imq_nf_queue().
++ *
++ *             2009/04/12
++ *              - Add skb_save_cb/skb_restore_cb helper functions for backuping
++ *                control buffer. This is needed because qdisc-layer on kernels
++ *                2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
++ *              - Add better locking for IMQ device. Hopefully this will solve
++ *                SMP issues. (Jussi Kivilinna)
++ *              - Port to 2.6.27
++ *              - Port to 2.6.28
++ *              - Port to 2.6.29 + fix rmmod not working
++ *
++ *             2009/04/20 - (Jussi Kivilinna)
++ *              - Use netdevice feature flags to avoid extra packet handling
++ *                by core networking layer and possibly increase performance.
++ *
++ *             2009/09/26 - (Jussi Kivilinna)
++ *              - Add imq_nf_reinject_lockless to fix deadlock with
++ *                imq_nf_queue/imq_nf_reinject.
++ *
++ *             2009/12/08 - (Jussi Kivilinna)
++ *              - Port to 2.6.32
++ *              - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
++ *              - Also add better error checking for skb->nf_queue_entry usage
++ *
++ *             2010/02/25 - (Jussi Kivilinna)
++ *              - Port to 2.6.33
++ *
++ *             2010/08/15 - (Jussi Kivilinna)
++ *              - Port to 2.6.35
++ *              - Simplify hook registration by using nf_register_hooks.
++ *              - nf_reinject doesn't need spinlock around it, therefore remove
++ *                imq_nf_reinject function. Other nf_reinject users protect
++ *                their own data with spinlock. With IMQ however all data is
++ *                needed is stored per skbuff, so no locking is needed.
++ *              - Changed IMQ to use 'separate' NF_IMQ_QUEUE instead of
++ *                NF_QUEUE, this allows working coexistance of IMQ and other
++ *                NF_QUEUE users.
++ *              - Make IMQ multi-queue. Number of IMQ device queues can be
++ *                increased with 'numqueues' module parameters. Default number
++ *                of queues is 1, in other words by default IMQ works as
++ *                single-queue device. Multi-queue selection is based on
++ *                IFB multi-queue patch by Changli Gao <xiaosuo@gmail.com>.
++ *
++ *             2011/03/18 - (Jussi Kivilinna)
++ *              - Port to 2.6.38
++ *
++ *             2011/07/12 - (syoder89@gmail.com)
++ *              - Crash fix that happens when the receiving interface has more
++ *                than one queue (add missing skb_set_queue_mapping in
++ *                imq_select_queue).
++ *
++ *             2011/07/26 - (Jussi Kivilinna)
++ *              - Add queue mapping checks for packets exiting IMQ.
++ *              - Port to 3.0
++ *
++ *             2011/08/16 - (Jussi Kivilinna)
++ *              - Clear IFF_TX_SKB_SHARING flag that was added for linux 3.0.2
++ *
++ *             2011/11/03 - Germano Michel <germanomichel@gmail.com>
++ *              - Fix IMQ for net namespaces
++ *
++ *             2011/11/04 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ *              - Port to 3.1
++ *              - Clean-up, move 'get imq device pointer by imqX name' to
++ *                separate function from imq_nf_queue().
++ *
++ *             2012/01/05 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ *              - Port to 3.2
++ *
++ *             2012/03/19 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ *              - Port to 3.3
++ *
++ *             2012/12/12 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ *              - Port to 3.7
++ *              - Fix checkpatch.pl warnings
++ *
++ *           Also, many thanks to pablo Sebastian Greco for making the initial
++ *           patch and to those who helped the testing.
++ *
++ *             More info at: http://www.linuximq.net/ (Andre Correa)
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/moduleparam.h>
++#include <linux/list.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_arp.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++      #include <linux/netfilter_ipv6.h>
++#endif
++#include <linux/imq.h>
++#include <net/pkt_sched.h>
++#include <net/netfilter/nf_queue.h>
++#include <net/sock.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/if_vlan.h>
++#include <linux/if_pppox.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned int queue_num);
++
++static nf_hookfn imq_nf_hook;
++
++static struct nf_hook_ops imq_ops[] = {
++      {
++      /* imq_ingress_ipv4 */
++              .hook           = imq_nf_hook,
++              .owner          = THIS_MODULE,
++              .pf             = PF_INET,
++              .hooknum        = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++              .priority       = NF_IP_PRI_MANGLE + 1,
++#else
++              .priority       = NF_IP_PRI_NAT_DST + 1,
++#endif
++      },
++      {
++      /* imq_egress_ipv4 */
++              .hook           = imq_nf_hook,
++              .owner          = THIS_MODULE,
++              .pf             = PF_INET,
++              .hooknum        = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++              .priority       = NF_IP_PRI_LAST,
++#else
++              .priority       = NF_IP_PRI_NAT_SRC - 1,
++#endif
++      },
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++      {
++      /* imq_ingress_ipv6 */
++              .hook           = imq_nf_hook,
++              .owner          = THIS_MODULE,
++              .pf             = PF_INET6,
++              .hooknum        = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++              .priority       = NF_IP6_PRI_MANGLE + 1,
++#else
++              .priority       = NF_IP6_PRI_NAT_DST + 1,
++#endif
++      },
++      {
++      /* imq_egress_ipv6 */
++              .hook           = imq_nf_hook,
++              .owner          = THIS_MODULE,
++              .pf             = PF_INET6,
++              .hooknum        = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++              .priority       = NF_IP6_PRI_LAST,
++#else
++              .priority       = NF_IP6_PRI_NAT_SRC - 1,
++#endif
++      },
++#endif
++};
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++static int numdevs = CONFIG_IMQ_NUM_DEVS;
++#else
++static int numdevs = IMQ_MAX_DEVS;
++#endif
++
++static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
++
++#define IMQ_MAX_QUEUES 32
++static int numqueues = 1;
++static u32 imq_hashrnd;
++
++static inline __be16 pppoe_proto(const struct sk_buff *skb)
++{
++      return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
++                      sizeof(struct pppoe_hdr)));
++}
++
++static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
++{
++      unsigned int pull_len;
++      u16 protocol = skb->protocol;
++      u32 addr1, addr2;
++      u32 hash, ihl = 0;
++      union {
++              u16 in16[2];
++              u32 in32;
++      } ports;
++      u8 ip_proto;
++
++      pull_len = 0;
++
++recheck:
++      switch (protocol) {
++      case htons(ETH_P_8021Q): {
++              if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
++                      goto other;
++
++              pull_len += VLAN_HLEN;
++              skb->network_header += VLAN_HLEN;
++
++              protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
++              goto recheck;
++      }
++
++      case htons(ETH_P_PPP_SES): {
++              if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
++                      goto other;
++
++              pull_len += PPPOE_SES_HLEN;
++              skb->network_header += PPPOE_SES_HLEN;
++
++              protocol = pppoe_proto(skb);
++              goto recheck;
++      }
++
++      case htons(ETH_P_IP): {
++              const struct iphdr *iph = ip_hdr(skb);
++
++              if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
++                      goto other;
++
++              addr1 = iph->daddr;
++              addr2 = iph->saddr;
++
++              ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
++                               iph->protocol : 0;
++              ihl = ip_hdrlen(skb);
++
++              break;
++      }
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++      case htons(ETH_P_IPV6): {
++              const struct ipv6hdr *iph = ipv6_hdr(skb);
++              __be16 fo = 0;
++
++              if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
++                      goto other;
++
++              addr1 = iph->daddr.s6_addr32[3];
++              addr2 = iph->saddr.s6_addr32[3];
++              ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
++                                     &fo);
++              if (unlikely(ihl < 0))
++                      goto other;
++
++              break;
++      }
++#endif
++      default:
++other:
++              if (pull_len != 0) {
++                      skb_push(skb, pull_len);
++                      skb->network_header -= pull_len;
++              }
++
++              return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
++      }
++
++      if (addr1 > addr2)
++              swap(addr1, addr2);
++
++      switch (ip_proto) {
++      case IPPROTO_TCP:
++      case IPPROTO_UDP:
++      case IPPROTO_DCCP:
++      case IPPROTO_ESP:
++      case IPPROTO_AH:
++      case IPPROTO_SCTP:
++      case IPPROTO_UDPLITE: {
++              if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
++                      if (ports.in16[0] > ports.in16[1])
++                              swap(ports.in16[0], ports.in16[1]);
++                      break;
++              }
++              /* fall-through */
++      }
++      default:
++              ports.in32 = 0;
++              break;
++      }
++
++      if (pull_len != 0) {
++              skb_push(skb, pull_len);
++              skb->network_header -= pull_len;
++      }
++
++      hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
++
++      return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++}
++
++static inline bool sk_tx_queue_recorded(struct sock *sk)
++{
++      return (sk_tx_queue_get(sk) >= 0);
++}
++
++static struct netdev_queue *imq_select_queue(struct net_device *dev,
++                                              struct sk_buff *skb)
++{
++      u16 queue_index = 0;
++      u32 hash;
++
++      if (likely(dev->real_num_tx_queues == 1))
++              goto out;
++
++      /* IMQ can be receiving ingress or engress packets. */
++
++      /* Check first for if rx_queue is set */
++      if (skb_rx_queue_recorded(skb)) {
++              queue_index = skb_get_rx_queue(skb);
++              goto out;
++      }
++
++      /* Check if socket has tx_queue set */
++      if (sk_tx_queue_recorded(skb->sk)) {
++              queue_index = sk_tx_queue_get(skb->sk);
++              goto out;
++      }
++
++      /* Try use socket hash */
++      if (skb->sk && skb->sk->sk_hash) {
++              hash = skb->sk->sk_hash;
++              queue_index =
++                      (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++              goto out;
++      }
++
++      /* Generate hash from packet data */
++      queue_index = imq_hash(dev, skb);
++
++out:
++      if (unlikely(queue_index >= dev->real_num_tx_queues))
++              queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
++
++      skb_set_queue_mapping(skb, queue_index);
++      return netdev_get_tx_queue(dev, queue_index);
++}
++
++static struct net_device_stats *imq_get_stats(struct net_device *dev)
++{
++      return &dev->stats;
++}
++
++/* called for packets kfree'd in qdiscs at places other than enqueue */
++static void imq_skb_destructor(struct sk_buff *skb)
++{
++      struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++      skb->nf_queue_entry = NULL;
++
++      if (entry) {
++              nf_queue_entry_release_refs(entry);
++              kfree(entry);
++      }
++
++      skb_restore_cb(skb); /* kfree backup */
++}
++
++static void imq_done_check_queue_mapping(struct sk_buff *skb,
++                                       struct net_device *dev)
++{
++      unsigned int queue_index;
++
++      /* Don't let queue_mapping be left too large after exiting IMQ */
++      if (likely(skb->dev != dev && skb->dev != NULL)) {
++              queue_index = skb_get_queue_mapping(skb);
++              if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
++                      queue_index = (u16)((u32)queue_index %
++                                              skb->dev->real_num_tx_queues);
++                      skb_set_queue_mapping(skb, queue_index);
++              }
++      } else {
++              /* skb->dev was IMQ device itself or NULL, be on safe side and
++               * just clear queue mapping.
++               */
++              skb_set_queue_mapping(skb, 0);
++      }
++}
++
++static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++      struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++      skb->nf_queue_entry = NULL;
++      dev->trans_start = jiffies;
++
++      dev->stats.tx_bytes += skb->len;
++      dev->stats.tx_packets++;
++
++      if (unlikely(entry == NULL)) {
++              /* We don't know what is going on here.. packet is queued for
++               * imq device, but (probably) not by us.
++               *
++               * If this packet was not send here by imq_nf_queue(), then
++               * skb_save_cb() was not used and skb_free() should not show:
++               *   WARNING: IMQ: kfree_skb: skb->cb_next:..
++               * and/or
++               *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
++               *
++               * However if this message is shown, then IMQ is somehow broken
++               * and you should report this to linuximq.net.
++               */
++
++              /* imq_dev_xmit is black hole that eats all packets, report that
++               * we eat this packet happily and increase dropped counters.
++               */
++
++              dev->stats.tx_dropped++;
++              dev_kfree_skb(skb);
++
++              return NETDEV_TX_OK;
++      }
++
++      skb_restore_cb(skb); /* restore skb->cb */
++
++      skb->imq_flags = 0;
++      skb->destructor = NULL;
++
++      imq_done_check_queue_mapping(skb, dev);
++
++      nf_reinject(entry, NF_ACCEPT);
++
++      return NETDEV_TX_OK;
++}
++
++static struct net_device *get_imq_device_by_index(int index)
++{
++      struct net_device *dev = NULL;
++      struct net *net;
++      char buf[8];
++
++      /* get device by name and cache result */
++      snprintf(buf, sizeof(buf), "imq%d", index);
++
++      /* Search device from all namespaces. */
++      for_each_net(net) {
++              dev = dev_get_by_name(net, buf);
++              if (dev)
++                      break;
++      }
++
++      if (WARN_ON_ONCE(dev == NULL)) {
++              /* IMQ device not found. Exotic config? */
++              return ERR_PTR(-ENODEV);
++      }
++
++      imq_devs_cache[index] = dev;
++      dev_put(dev);
++
++      return dev;
++}
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned int queue_num)
++{
++      struct net_device *dev;
++      struct sk_buff *skb_orig, *skb, *skb_shared;
++      struct Qdisc *q;
++      struct netdev_queue *txq;
++      spinlock_t *root_lock;
++      int users, index;
++      int retval = -EINVAL;
++      unsigned int orig_queue_index;
++
++      index = entry->skb->imq_flags & IMQ_F_IFMASK;
++      if (unlikely(index > numdevs - 1)) {
++              if (net_ratelimit())
++                      pr_warn("IMQ: invalid device specified, highest is %u\n",
++                              numdevs - 1);
++              retval = -EINVAL;
++              goto out;
++      }
++
++      /* check for imq device by index from cache */
++      dev = imq_devs_cache[index];
++      if (unlikely(!dev)) {
++              dev = get_imq_device_by_index(index);
++              if (IS_ERR(dev)) {
++                      retval = PTR_ERR(dev);
++                      goto out;
++              }
++      }
++
++      if (unlikely(!(dev->flags & IFF_UP))) {
++              entry->skb->imq_flags = 0;
++              nf_reinject(entry, NF_ACCEPT);
++              retval = 0;
++              goto out;
++      }
++      dev->last_rx = jiffies;
++
++      skb = entry->skb;
++      skb_orig = NULL;
++
++      /* skb has owner? => make clone */
++      if (unlikely(skb->destructor)) {
++              skb_orig = skb;
++              skb = skb_clone(skb, GFP_ATOMIC);
++              if (unlikely(!skb)) {
++                      retval = -ENOMEM;
++                      goto out;
++              }
++              entry->skb = skb;
++      }
++
++      skb->nf_queue_entry = entry;
++
++      dev->stats.rx_bytes += skb->len;
++      dev->stats.rx_packets++;
++
++      if (!skb->dev) {
++              /* skb->dev == NULL causes problems, try the find cause. */
++              if (net_ratelimit()) {
++                      dev_warn(&dev->dev,
++                               "received packet with skb->dev == NULL\n");
++                      dump_stack();
++              }
++
++              skb->dev = dev;
++      }
++
++      /* Disables softirqs for lock below */
++      rcu_read_lock_bh();
++
++      /* Multi-queue selection */
++      orig_queue_index = skb_get_queue_mapping(skb);
++      txq = imq_select_queue(dev, skb);
++
++      q = rcu_dereference(txq->qdisc);
++      if (unlikely(!q->enqueue))
++              goto packet_not_eaten_by_imq_dev;
++
++      root_lock = qdisc_lock(q);
++      spin_lock(root_lock);
++
++      users = atomic_read(&skb->users);
++
++      skb_shared = skb_get(skb); /* increase reference count by one */
++
++      /* backup skb->cb, as qdisc layer will overwrite it */
++      skb_save_cb(skb_shared);
++      qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
++
++      if (likely(atomic_read(&skb_shared->users) == users + 1)) {
++              kfree_skb(skb_shared); /* decrease reference count by one */
++
++              skb->destructor = &imq_skb_destructor;
++
++              /* cloned? */
++              if (unlikely(skb_orig))
++                      kfree_skb(skb_orig); /* free original */
++
++              spin_unlock(root_lock);
++              rcu_read_unlock_bh();
++
++              /* schedule qdisc dequeue */
++              __netif_schedule(q);
++
++              retval = 0;
++              goto out;
++      } else {
++              skb_restore_cb(skb_shared); /* restore skb->cb */
++              skb->nf_queue_entry = NULL;
++              /*
++               * qdisc dropped packet and decreased skb reference count of
++               * skb, so we don't really want to and try refree as that would
++               * actually destroy the skb.
++               */
++              spin_unlock(root_lock);
++              goto packet_not_eaten_by_imq_dev;
++      }
++
++packet_not_eaten_by_imq_dev:
++      skb_set_queue_mapping(skb, orig_queue_index);
++      rcu_read_unlock_bh();
++
++      /* cloned? restore original */
++      if (unlikely(skb_orig)) {
++              kfree_skb(skb);
++              entry->skb = skb_orig;
++      }
++      retval = -1;
++out:
++      return retval;
++}
++
++static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
++                              const struct net_device *indev,
++                              const struct net_device *outdev,
++                              int (*okfn)(struct sk_buff *))
++{
++      return (pskb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
++}
++
++static int imq_close(struct net_device *dev)
++{
++      netif_stop_queue(dev);
++      return 0;
++}
++
++static int imq_open(struct net_device *dev)
++{
++      netif_start_queue(dev);
++      return 0;
++}
++
++static const struct net_device_ops imq_netdev_ops = {
++      .ndo_open               = imq_open,
++      .ndo_stop               = imq_close,
++      .ndo_start_xmit         = imq_dev_xmit,
++      .ndo_get_stats          = imq_get_stats,
++};
++
++static void imq_setup(struct net_device *dev)
++{
++      dev->netdev_ops         = &imq_netdev_ops;
++      dev->type               = ARPHRD_VOID;
++      dev->mtu                = 16000; /* too small? */
++      dev->tx_queue_len       = 11000; /* too big? */
++      dev->flags              = IFF_NOARP;
++      dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
++                                NETIF_F_GSO | NETIF_F_HW_CSUM |
++                                NETIF_F_HIGHDMA;
++      dev->priv_flags         &= ~(IFF_XMIT_DST_RELEASE |
++                                   IFF_TX_SKB_SHARING);
++}
++
++static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
++{
++      int ret = 0;
++
++      if (tb[IFLA_ADDRESS]) {
++              if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
++                      ret = -EINVAL;
++                      goto end;
++              }
++              if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
++                      ret = -EADDRNOTAVAIL;
++                      goto end;
++              }
++      }
++      return 0;
++end:
++      pr_warn("IMQ: imq_validate failed (%d)\n", ret);
++      return ret;
++}
++
++static struct rtnl_link_ops imq_link_ops __read_mostly = {
++      .kind           = "imq",
++      .priv_size      = 0,
++      .setup          = imq_setup,
++      .validate       = imq_validate,
++};
++
++static const struct nf_queue_handler imq_nfqh = {
++      .outfn = imq_nf_queue,
++};
++
++static int __init imq_init_hooks(void)
++{
++      int ret;
++
++      nf_register_queue_imq_handler(&imq_nfqh);
++
++      ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++      if (ret < 0)
++              nf_unregister_queue_imq_handler();
++
++      return ret;
++}
++
++static int __init imq_init_one(int index)
++{
++      struct net_device *dev;
++      int ret;
++
++      dev = alloc_netdev_mq(0, "imq%d", imq_setup, numqueues);
++      if (!dev)
++              return -ENOMEM;
++
++      ret = dev_alloc_name(dev, dev->name);
++      if (ret < 0)
++              goto fail;
++
++      dev->rtnl_link_ops = &imq_link_ops;
++      ret = register_netdevice(dev);
++      if (ret < 0)
++              goto fail;
++
++      return 0;
++fail:
++      free_netdev(dev);
++      return ret;
++}
++
++static int __init imq_init_devs(void)
++{
++      int err, i;
++
++      if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
++              pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
++                     IMQ_MAX_DEVS);
++              return -EINVAL;
++      }
++
++      if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
++              pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
++                     IMQ_MAX_QUEUES);
++              return -EINVAL;
++      }
++
++      get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
++
++      rtnl_lock();
++      err = __rtnl_link_register(&imq_link_ops);
++
++      for (i = 0; i < numdevs && !err; i++)
++              err = imq_init_one(i);
++
++      if (err) {
++              __rtnl_link_unregister(&imq_link_ops);
++              memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++      }
++      rtnl_unlock();
++
++      return err;
++}
++
++static int __init imq_init_module(void)
++{
++      int err;
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++      BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
++      BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
++      BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
++#endif
++
++      err = imq_init_devs();
++      if (err) {
++              pr_err("IMQ: Error trying imq_init_devs(net)\n");
++              return err;
++      }
++
++      err = imq_init_hooks();
++      if (err) {
++              pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
++              rtnl_link_unregister(&imq_link_ops);
++              memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++              return err;
++      }
++
++      pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d)\n",
++              numdevs, numqueues);
++
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++      pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
++#else
++      pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
++#endif
++#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++      pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
++#else
++      pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
++#endif
++
++      return 0;
++}
++
++static void __exit imq_unhook(void)
++{
++      nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++      nf_unregister_queue_imq_handler();
++}
++
++static void __exit imq_cleanup_devs(void)
++{
++      rtnl_link_unregister(&imq_link_ops);
++      memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++}
++
++static void __exit imq_exit_module(void)
++{
++      imq_unhook();
++      imq_cleanup_devs();
++      pr_info("IMQ driver unloaded successfully.\n");
++}
++
++module_init(imq_init_module);
++module_exit(imq_exit_module);
++
++module_param(numdevs, int, 0);
++module_param(numqueues, int, 0);
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("imq");
++
+diff -uNr linux-3.9.1/drivers/net/Kconfig linux-3.9.1-imqmq/drivers/net/Kconfig
+--- linux-3.9.1/drivers/net/Kconfig    2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/drivers/net/Kconfig      2013-05-08 17:30:29.011952562 +0300
+@@ -206,6 +206,125 @@
+       depends on RIONET
+       default "128"
++config IMQ
++      tristate "IMQ (intermediate queueing device) support"
++      depends on NETDEVICES && NETFILTER
++      ---help---
++        The IMQ device(s) is used as placeholder for QoS queueing
++        disciplines. Every packet entering/leaving the IP stack can be
++        directed through the IMQ device where it's enqueued/dequeued to the
++        attached qdisc. This allows you to treat network devices as classes
++        and distribute bandwidth among them. Iptables is used to specify
++        through which IMQ device, if any, packets travel.
++
++        More information at: http://www.linuximq.net/
++
++        To compile this driver as a module, choose M here: the module
++        will be called imq.  If unsure, say N.
++
++choice
++      prompt "IMQ behavior (PRE/POSTROUTING)"
++      depends on IMQ
++      default IMQ_BEHAVIOR_AB
++      help
++        This setting defines how IMQ behaves in respect to its
++        hooking in PREROUTING and POSTROUTING.
++
++        IMQ can work in any of the following ways:
++
++            PREROUTING   |      POSTROUTING
++        -----------------|-------------------
++        #1  After NAT    |      After NAT
++        #2  After NAT    |      Before NAT
++        #3  Before NAT   |      After NAT
++        #4  Before NAT   |      Before NAT
++
++        The default behavior is to hook before NAT on PREROUTING
++        and after NAT on POSTROUTING (#3).
++
++        This settings are specially usefull when trying to use IMQ
++        to shape NATed clients.
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AA
++      bool "IMQ AA"
++      help
++        This setting defines how IMQ behaves in respect to its
++        hooking in PREROUTING and POSTROUTING.
++
++        Choosing this option will make IMQ hook like this:
++
++        PREROUTING:   After NAT
++        POSTROUTING:  After NAT
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AB
++      bool "IMQ AB"
++      help
++        This setting defines how IMQ behaves in respect to its
++        hooking in PREROUTING and POSTROUTING.
++
++        Choosing this option will make IMQ hook like this:
++
++        PREROUTING:   After NAT
++        POSTROUTING:  Before NAT
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BA
++      bool "IMQ BA"
++      help
++        This setting defines how IMQ behaves in respect to its
++        hooking in PREROUTING and POSTROUTING.
++
++        Choosing this option will make IMQ hook like this:
++
++        PREROUTING:   Before NAT
++        POSTROUTING:  After NAT
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BB
++      bool "IMQ BB"
++      help
++        This setting defines how IMQ behaves in respect to its
++        hooking in PREROUTING and POSTROUTING.
++
++        Choosing this option will make IMQ hook like this:
++
++        PREROUTING:   Before NAT
++        POSTROUTING:  Before NAT
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
++endchoice
++
++config IMQ_NUM_DEVS
++      int "Number of IMQ devices"
++      range 2 16
++      depends on IMQ
++      default "16"
++      help
++        This setting defines how many IMQ devices will be created.
++
++        The default value is 16.
++
++        More information can be found at: www.linuximq.net
++
++        If not sure leave the default settings alone.
++
+ config TUN
+       tristate "Universal TUN/TAP device driver support"
+       select CRC32
+diff -uNr linux-3.9.1/drivers/net/Makefile linux-3.9.1-imqmq/drivers/net/Makefile
+--- linux-3.9.1/drivers/net/Makefile   2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/drivers/net/Makefile     2013-05-08 17:30:29.011952562 +0300
+@@ -9,6 +9,7 @@
+ obj-$(CONFIG_DUMMY) += dummy.o
+ obj-$(CONFIG_EQUALIZER) += eql.o
+ obj-$(CONFIG_IFB) += ifb.o
++obj-$(CONFIG_IMQ) += imq.o
+ obj-$(CONFIG_MACVLAN) += macvlan.o
+ obj-$(CONFIG_MACVTAP) += macvtap.o
+ obj-$(CONFIG_MII) += mii.o
+diff -uNr linux-3.9.1/include/linux/imq.h linux-3.9.1-imqmq/include/linux/imq.h
+--- linux-3.9.1/include/linux/imq.h    1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/include/linux/imq.h      2013-05-08 17:30:29.011952562 +0300
+@@ -0,0 +1,13 @@
++#ifndef _IMQ_H
++#define _IMQ_H
++
++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
++#define IMQ_F_BITS    5
++
++#define IMQ_F_IFMASK  0x0f
++#define IMQ_F_ENQUEUE 0x10
++
++#define IMQ_MAX_DEVS  (IMQ_F_IFMASK + 1)
++
++#endif /* _IMQ_H */
++
+diff -uNr linux-3.9.1/include/linux/netfilter/xt_IMQ.h linux-3.9.1-imqmq/include/linux/netfilter/xt_IMQ.h
+--- linux-3.9.1/include/linux/netfilter/xt_IMQ.h       1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/include/linux/netfilter/xt_IMQ.h 2013-05-08 17:30:29.011952562 +0300
+@@ -0,0 +1,9 @@
++#ifndef _XT_IMQ_H
++#define _XT_IMQ_H
++
++struct xt_imq_info {
++      unsigned int todev;     /* target imq device */
++};
++
++#endif /* _XT_IMQ_H */
++
+diff -uNr linux-3.9.1/include/linux/netfilter_ipv4/ipt_IMQ.h linux-3.9.1-imqmq/include/linux/netfilter_ipv4/ipt_IMQ.h
+--- linux-3.9.1/include/linux/netfilter_ipv4/ipt_IMQ.h 1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/include/linux/netfilter_ipv4/ipt_IMQ.h   2013-05-08 17:30:29.011952562 +0300
+@@ -0,0 +1,10 @@
++#ifndef _IPT_IMQ_H
++#define _IPT_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ipt_imq_info xt_imq_info
++
++#endif /* _IPT_IMQ_H */
++
+diff -uNr linux-3.9.1/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-3.9.1-imqmq/include/linux/netfilter_ipv6/ip6t_IMQ.h
+--- linux-3.9.1/include/linux/netfilter_ipv6/ip6t_IMQ.h        1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/include/linux/netfilter_ipv6/ip6t_IMQ.h  2013-05-08 17:30:29.011952562 +0300
+@@ -0,0 +1,10 @@
++#ifndef _IP6T_IMQ_H
++#define _IP6T_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ip6t_imq_info xt_imq_info
++
++#endif /* _IP6T_IMQ_H */
++
+diff -uNr linux-3.9.1/include/linux/skbuff.h linux-3.9.1-imqmq/include/linux/skbuff.h
+--- linux-3.9.1/include/linux/skbuff.h 2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/include/linux/skbuff.h   2013-05-08 17:30:29.015285965 +0300
+@@ -32,6 +32,9 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/netdev_features.h>
+ #include <net/flow_keys.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+ /* Don't change this without changing skb_csum_unnecessary! */
+ #define CHECKSUM_NONE 0
+@@ -415,6 +418,9 @@
+        * first. This is owned by whoever has the skb queued ATM.
+        */
+       char                    cb[48] __aligned(8);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      void                    *cb_next;
++#endif
+       unsigned long           _skb_refdst;
+ #ifdef CONFIG_XFRM
+@@ -453,6 +459,9 @@
+ #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+       struct sk_buff          *nfct_reasm;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      struct nf_queue_entry   *nf_queue_entry;
++#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+       struct nf_bridge_info   *nf_bridge;
+ #endif
+@@ -491,6 +500,10 @@
+       /* 7/9 bit hole (depending on ndisc_nodetype presence) */
+       kmemcheck_bitfield_end(flags2);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      __u8                    imq_flags:IMQ_F_BITS;
++#endif
++
+ #ifdef CONFIG_NET_DMA
+       dma_cookie_t            dma_cookie;
+ #endif
+@@ -586,6 +599,12 @@
+       return (struct rtable *)skb_dst(skb);
+ }
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern int skb_save_cb(struct sk_buff *skb);
++extern int skb_restore_cb(struct sk_buff *skb);
++#endif
++
+ extern void kfree_skb(struct sk_buff *skb);
+ extern void skb_tx_error(struct sk_buff *skb);
+ extern void consume_skb(struct sk_buff *skb);
+@@ -2662,6 +2681,10 @@
+       dst->nfct_reasm = src->nfct_reasm;
+       nf_conntrack_get_reasm(src->nfct_reasm);
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      dst->imq_flags = src->imq_flags;
++      dst->nf_queue_entry = src->nf_queue_entry;
++#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+       dst->nf_bridge  = src->nf_bridge;
+       nf_bridge_get(src->nf_bridge);
+diff -uNr linux-3.9.1/include/net/netfilter/nf_queue.h linux-3.9.1-imqmq/include/net/netfilter/nf_queue.h
+--- linux-3.9.1/include/net/netfilter/nf_queue.h       2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/include/net/netfilter/nf_queue.h 2013-05-08 17:30:29.015285965 +0300
+@@ -26,5 +26,11 @@
+ void nf_register_queue_handler(const struct nf_queue_handler *qh);
+ void nf_unregister_queue_handler(void);
+ extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
++extern void nf_unregister_queue_imq_handler(void);
++#endif
+ #endif /* _NF_QUEUE_H */
+diff -uNr linux-3.9.1/include/uapi/linux/netfilter.h linux-3.9.1-imqmq/include/uapi/linux/netfilter.h
+--- linux-3.9.1/include/uapi/linux/netfilter.h 2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/include/uapi/linux/netfilter.h   2013-05-08 17:30:29.015285965 +0300
+@@ -13,7 +13,8 @@
+ #define NF_QUEUE 3
+ #define NF_REPEAT 4
+ #define NF_STOP 5
+-#define NF_MAX_VERDICT NF_STOP
++#define NF_IMQ_QUEUE 6
++#define NF_MAX_VERDICT NF_IMQ_QUEUE
+ /* we overload the higher bits for encoding auxiliary data such as the queue
+  * number or errno values. Not nice, but better than additional function
+diff -uNr linux-3.9.1/net/core/dev.c linux-3.9.1-imqmq/net/core/dev.c
+--- linux-3.9.1/net/core/dev.c 2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/core/dev.c   2013-05-08 17:30:29.018619368 +0300
+@@ -129,6 +129,9 @@
+ #include <linux/inetdevice.h>
+ #include <linux/cpu_rmap.h>
+ #include <linux/static_key.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+ #include "net-sysfs.h"
+@@ -2529,7 +2532,12 @@
+                       }
+               }
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++              if (!list_empty(&ptype_all) &&
++                                      !(skb->imq_flags & IMQ_F_ENQUEUE))
++#else
+               if (!list_empty(&ptype_all))
++#endif
+                       dev_queue_xmit_nit(skb, dev);
+               skb_len = skb->len;
+diff -uNr linux-3.9.1/net/core/skbuff.c linux-3.9.1-imqmq/net/core/skbuff.c
+--- linux-3.9.1/net/core/skbuff.c      2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/core/skbuff.c        2013-05-08 17:30:29.021952772 +0300
+@@ -73,6 +73,9 @@
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
++#endif
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+                                 struct pipe_buffer *buf)
+@@ -92,6 +95,82 @@
+       return 1;
+ }
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++/* Control buffer save/restore for IMQ devices */
++struct skb_cb_table {
++      char                    cb[48] __aligned(8);
++      void                    *cb_next;
++      atomic_t                refcnt;
++};
++
++static DEFINE_SPINLOCK(skb_cb_store_lock);
++
++int skb_save_cb(struct sk_buff *skb)
++{
++      struct skb_cb_table *next;
++
++      next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
++      if (!next)
++              return -ENOMEM;
++
++      BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++      memcpy(next->cb, skb->cb, sizeof(skb->cb));
++      next->cb_next = skb->cb_next;
++
++      atomic_set(&next->refcnt, 1);
++
++      skb->cb_next = next;
++      return 0;
++}
++EXPORT_SYMBOL(skb_save_cb);
++
++int skb_restore_cb(struct sk_buff *skb)
++{
++      struct skb_cb_table *next;
++
++      if (!skb->cb_next)
++              return 0;
++
++      next = skb->cb_next;
++
++      BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++      memcpy(skb->cb, next->cb, sizeof(skb->cb));
++      skb->cb_next = next->cb_next;
++
++      spin_lock(&skb_cb_store_lock);
++
++      if (atomic_dec_and_test(&next->refcnt))
++              kmem_cache_free(skbuff_cb_store_cache, next);
++
++      spin_unlock(&skb_cb_store_lock);
++
++      return 0;
++}
++EXPORT_SYMBOL(skb_restore_cb);
++
++static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
++{
++      struct skb_cb_table *next;
++      struct sk_buff *old;
++
++      if (!__old->cb_next) {
++              new->cb_next = NULL;
++              return;
++      }
++
++      spin_lock(&skb_cb_store_lock);
++
++      old = (struct sk_buff *)__old;
++
++      next = old->cb_next;
++      atomic_inc(&next->refcnt);
++      new->cb_next = next;
++
++      spin_unlock(&skb_cb_store_lock);
++}
++#endif
+ /* Pipe buffer operations for a socket. */
+ static const struct pipe_buf_operations sock_pipe_buf_ops = {
+@@ -562,6 +641,28 @@
+               WARN_ON(in_irq());
+               skb->destructor(skb);
+       }
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      /*
++       * This should not happen. When it does, avoid memleak by restoring
++       * the chain of cb-backups.
++       */
++      while (skb->cb_next != NULL) {
++              if (net_ratelimit())
++                      pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
++                              (unsigned int)skb->cb_next);
++
++              skb_restore_cb(skb);
++      }
++      /*
++       * This should not happen either, nf_queue_entry is nullified in
++       * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
++       * leaking entry pointers, maybe memory. We don't know if this is
++       * pointer to already freed memory, or should this be freed.
++       * If this happens we need to add refcounting, etc for nf_queue_entry.
++       */
++      if (skb->nf_queue_entry && net_ratelimit())
++              pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
++#endif
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       nf_conntrack_put(skb->nfct);
+ #endif
+@@ -683,6 +784,9 @@
+       new->sp                 = secpath_get(old->sp);
+ #endif
+       memcpy(new->cb, old->cb, sizeof(old->cb));
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      skb_copy_stored_cb(new, old);
++#endif
+       new->csum               = old->csum;
+       new->local_df           = old->local_df;
+       new->pkt_type           = old->pkt_type;
+@@ -3053,6 +3157,13 @@
+                                               0,
+                                               SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                                               NULL);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++      skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
++                                                sizeof(struct skb_cb_table),
++                                                0,
++                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++                                                NULL);
++#endif
+ }
+ /**
+diff -uNr linux-3.9.1/net/ipv6/ip6_output.c linux-3.9.1-imqmq/net/ipv6/ip6_output.c
+--- linux-3.9.1/net/ipv6/ip6_output.c  2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/ipv6/ip6_output.c    2013-05-08 17:30:29.021952772 +0300
+@@ -89,9 +89,6 @@
+       struct in6_addr *nexthop;
+       int ret;
+-      skb->protocol = htons(ETH_P_IPV6);
+-      skb->dev = dev;
+-
+       if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
+               struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+@@ -167,6 +164,13 @@
+               return 0;
+       }
++      /*
++       * IMQ-patch: moved setting skb->dev and skb->protocol from
++       * ip6_finish_output2 to fix crashing at netif_skb_features().
++       */
++      skb->protocol = htons(ETH_P_IPV6);
++      skb->dev = dev;
++
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+                           ip6_finish_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+diff -uNr linux-3.9.1/net/netfilter/core.c linux-3.9.1-imqmq/net/netfilter/core.c
+--- linux-3.9.1/net/netfilter/core.c   2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/netfilter/core.c     2013-05-08 17:30:29.025286174 +0300
+@@ -188,9 +188,11 @@
+               ret = NF_DROP_GETERR(verdict);
+               if (ret == 0)
+                       ret = -EPERM;
+-      } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
++      } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
++                 (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
+               int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+-                                              verdict >> NF_VERDICT_QBITS);
++                                              verdict >> NF_VERDICT_QBITS,
++                                              verdict & NF_VERDICT_MASK);
+               if (err < 0) {
+                       if (err == -ECANCELED)
+                               goto next_hook;
+diff -uNr linux-3.9.1/net/netfilter/Kconfig linux-3.9.1-imqmq/net/netfilter/Kconfig
+--- linux-3.9.1/net/netfilter/Kconfig  2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/netfilter/Kconfig    2013-05-08 17:30:29.025286174 +0300
+@@ -641,6 +641,18 @@
+         To compile it as a module, choose M here.  If unsure, say N.
++config NETFILTER_XT_TARGET_IMQ
++        tristate '"IMQ" target support'
++      depends on NETFILTER_XTABLES
++      depends on IP_NF_MANGLE || IP6_NF_MANGLE
++      select IMQ
++      default m if NETFILTER_ADVANCED=n
++        help
++          This option adds a `IMQ' target which is used to specify if and
++          to which imq device packets should get enqueued/dequeued.
++
++          To compile it as a module, choose M here.  If unsure, say N.
++
+ config NETFILTER_XT_TARGET_MARK
+       tristate '"MARK" target support'
+       depends on NETFILTER_ADVANCED
+diff -uNr linux-3.9.1/net/netfilter/Makefile linux-3.9.1-imqmq/net/netfilter/Makefile
+--- linux-3.9.1/net/netfilter/Makefile 2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/netfilter/Makefile   2013-05-08 17:30:29.025286174 +0300
+@@ -82,6 +82,7 @@
+ obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
+diff -uNr linux-3.9.1/net/netfilter/nf_internals.h linux-3.9.1-imqmq/net/netfilter/nf_internals.h
+--- linux-3.9.1/net/netfilter/nf_internals.h   2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/netfilter/nf_internals.h     2013-05-08 17:30:29.025286174 +0300
+@@ -29,7 +29,7 @@
+                   struct net_device *indev,
+                   struct net_device *outdev,
+                   int (*okfn)(struct sk_buff *),
+-                  unsigned int queuenum);
++                  unsigned int queuenum, unsigned int queuetype);
+ extern int __init netfilter_queue_init(void);
+ /* nf_log.c */
+diff -uNr linux-3.9.1/net/netfilter/nf_queue.c linux-3.9.1-imqmq/net/netfilter/nf_queue.c
+--- linux-3.9.1/net/netfilter/nf_queue.c       2013-05-08 06:58:03.000000000 +0300
++++ linux-3.9.1-imqmq/net/netfilter/nf_queue.c 2013-05-08 17:30:29.025286174 +0300
+@@ -22,6 +22,23 @@
+  */
+ static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
++
++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
++{
++      rcu_assign_pointer(queue_imq_handler, qh);
++}
++EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
++
++void nf_unregister_queue_imq_handler(void)
++{
++      RCU_INIT_POINTER(queue_imq_handler, NULL);
++      synchronize_rcu();
++}
++EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
++#endif
++
+ /* return EBUSY when somebody else is registered, return EEXIST if the
+  * same handler is registered, return 0 in case of success. */
+ void nf_register_queue_handler(const struct nf_queue_handler *qh)
+@@ -71,7 +89,8 @@
+                     struct net_device *indev,
+                     struct net_device *outdev,
+                     int (*okfn)(struct sk_buff *),
+-                    unsigned int queuenum)
++                    unsigned int queuenum,
++                    unsigned int queuetype)
+ {
+       int status = -ENOENT;
+       struct nf_queue_entry *entry = NULL;
+@@ -85,7 +104,17 @@
+       /* QUEUE == DROP if no one is waiting, to be safe. */
+       rcu_read_lock();
+-      qh = rcu_dereference(queue_handler);
++      if (queuetype == NF_IMQ_QUEUE) {
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++              qh = rcu_dereference(queue_imq_handler);
++#else
++              BUG();
++              goto err_unlock;
++#endif
++      } else {
++              qh = rcu_dereference(queue_handler);
++      }
++
+       if (!qh) {
+               status = -ESRCH;
+               goto err_unlock;
+@@ -233,9 +261,11 @@
+               local_bh_enable();
+               break;
+       case NF_QUEUE:
++      case NF_IMQ_QUEUE:
+               err = nf_queue(skb, elem, entry->pf, entry->hook,
+                               entry->indev, entry->outdev, entry->okfn,
+-                              verdict >> NF_VERDICT_QBITS);
++                              verdict >> NF_VERDICT_QBITS,
++                              verdict & NF_VERDICT_MASK);
+               if (err < 0) {
+                       if (err == -ECANCELED)
+                               goto next_hook;
+diff -uNr linux-3.9.1/net/netfilter/xt_IMQ.c linux-3.9.1-imqmq/net/netfilter/xt_IMQ.c
+--- linux-3.9.1/net/netfilter/xt_IMQ.c 1970-01-01 02:00:00.000000000 +0200
++++ linux-3.9.1-imqmq/net/netfilter/xt_IMQ.c   2013-05-08 17:30:29.025286174 +0300
+@@ -0,0 +1,72 @@
++/*
++ * This target marks packets to be enqueued to an imq device
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_IMQ.h>
++#include <linux/imq.h>
++
++static unsigned int imq_target(struct sk_buff *pskb,
++                              const struct xt_action_param *par)
++{
++      const struct xt_imq_info *mr = par->targinfo;
++
++      pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
++
++      return XT_CONTINUE;
++}
++
++static int imq_checkentry(const struct xt_tgchk_param *par)
++{
++      struct xt_imq_info *mr = par->targinfo;
++
++      if (mr->todev > IMQ_MAX_DEVS - 1) {
++              pr_warn("IMQ: invalid device specified, highest is %u\n",
++                      IMQ_MAX_DEVS - 1);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static struct xt_target xt_imq_reg[] __read_mostly = {
++      {
++              .name           = "IMQ",
++              .family         = AF_INET,
++              .checkentry     = imq_checkentry,
++              .target         = imq_target,
++              .targetsize     = sizeof(struct xt_imq_info),
++              .table          = "mangle",
++              .me             = THIS_MODULE
++      },
++      {
++              .name           = "IMQ",
++              .family         = AF_INET6,
++              .checkentry     = imq_checkentry,
++              .target         = imq_target,
++              .targetsize     = sizeof(struct xt_imq_info),
++              .table          = "mangle",
++              .me             = THIS_MODULE
++      },
++};
++
++static int __init imq_init(void)
++{
++      return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++static void __exit imq_fini(void)
++{
++      xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++module_init(imq_init);
++module_exit(imq_fini);
++
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_IMQ");
++MODULE_ALIAS("ip6t_IMQ");
++
diff --git a/src/patches/linux-2.6-silence-acpi-blacklist.patch b/src/patches/linux-2.6-silence-acpi-blacklist.patch
new file mode 100644 (file)
index 0000000..c5997bb
--- /dev/null
@@ -0,0 +1,25 @@
+diff -up linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx linux-2.6.26.noarch/drivers/acpi/blacklist.c
+--- linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx    2008-07-13 17:51:29.000000000 -0400
++++ linux-2.6.26.noarch/drivers/acpi/blacklist.c       2008-08-12 14:21:39.000000000 -0400
+@@ -81,18 +81,18 @@ static int __init blacklist_by_year(void
+       /* Doesn't exist? Likely an old system */
+       if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
+-              printk(KERN_ERR PREFIX "no DMI BIOS year, "
++              printk(KERN_INFO PREFIX "no DMI BIOS year, "
+                       "acpi=force is required to enable ACPI\n" );
+               return 1;
+       }
+       /* 0? Likely a buggy new BIOS */
+       if (year == 0) {
+-              printk(KERN_ERR PREFIX "DMI BIOS year==0, "
++              printk(KERN_INFO PREFIX "DMI BIOS year==0, "
+                       "assuming ACPI-capable machine\n" );
+               return 0;
+       }
+       if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
+-              printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
++              printk(KERN_INFO PREFIX "BIOS age (%d) fails cutoff (%d), "
+                      "acpi=force is required to enable ACPI\n",
+                      year, CONFIG_ACPI_BLACKLIST_YEAR);
+               return 1;
diff --git a/src/patches/linux-2.6.30-no-pcspkr-modalias.patch b/src/patches/linux-2.6.30-no-pcspkr-modalias.patch
new file mode 100644 (file)
index 0000000..439269c
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
+index 34f4d2e..3e40c70 100644
+--- a/drivers/input/misc/pcspkr.c
++++ b/drivers/input/misc/pcspkr.c
+@@ -24,7 +24,6 @@
+ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
+ MODULE_DESCRIPTION("PC Speaker beeper driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform:pcspkr");
+ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+ {
diff --git a/src/patches/linux-3.10-ipp2p-0.8.2-ipfire.patch b/src/patches/linux-3.10-ipp2p-0.8.2-ipfire.patch
new file mode 100644 (file)
index 0000000..7393760
--- /dev/null
@@ -0,0 +1,1039 @@
+diff -Naur linux-3.10.9.org/include/linux/netfilter_ipv4/ipt_ipp2p.h linux-3.10.9/include/linux/netfilter_ipv4/ipt_ipp2p.h
+--- linux-3.10.9.org/include/linux/netfilter_ipv4/ipt_ipp2p.h  1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.9/include/linux/netfilter_ipv4/ipt_ipp2p.h      2013-08-25 16:00:53.398088168 +0200
+@@ -0,0 +1,31 @@
++#ifndef __IPT_IPP2P_H
++#define __IPT_IPP2P_H
++#define IPP2P_VERSION "0.8.2-ipfire"
++
++struct ipt_p2p_info {
++    int cmd;
++    int debug;
++};
++
++#endif //__IPT_IPP2P_H
++
++#define SHORT_HAND_IPP2P      1 /* --ipp2p switch*/
++//#define SHORT_HAND_DATA             4 /* --ipp2p-data switch*/
++#define SHORT_HAND_NONE               5 /* no short hand*/
++
++#define IPP2P_EDK             (1 << 1)
++#define IPP2P_DATA_KAZAA      (1 << 2)
++#define IPP2P_DATA_EDK                (1 << 3)
++#define IPP2P_DATA_DC         (1 << 4)
++#define IPP2P_DC              (1 << 5)
++#define IPP2P_DATA_GNU                (1 << 6)
++#define IPP2P_GNU             (1 << 7)
++#define IPP2P_KAZAA           (1 << 8)
++#define IPP2P_BIT             (1 << 9)
++#define IPP2P_APPLE           (1 << 10)
++#define IPP2P_SOUL            (1 << 11)
++#define IPP2P_WINMX           (1 << 12)
++#define IPP2P_ARES            (1 << 13)
++#define IPP2P_MUTE            (1 << 14)
++#define IPP2P_WASTE           (1 << 15)
++#define IPP2P_XDCC            (1 << 16)
+diff -Naur linux-3.10.9.org/net/ipv4/netfilter/ipt_ipp2p.c linux-3.10.9/net/ipv4/netfilter/ipt_ipp2p.c
+--- linux-3.10.9.org/net/ipv4/netfilter/ipt_ipp2p.c    1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.9/net/ipv4/netfilter/ipt_ipp2p.c        2013-08-25 16:00:53.398088168 +0200
+@@ -0,0 +1,970 @@
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_ipp2p.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++
++#define get_u8(X,O)  (*(__u8 *)(X + O))
++#define get_u16(X,O)  (*(__u16 *)(X + O))
++#define get_u32(X,O)  (*(__u32 *)(X + O))
++
++MODULE_AUTHOR("Eicke Friedrich/Klaus Degner <ipp2p@ipp2p.org>");
++MODULE_DESCRIPTION("An extension to iptables to identify P2P traffic.");
++MODULE_LICENSE("GPL");
++
++
++/*Search for UDP eDonkey/eMule/Kad commands*/
++int
++udp_search_edk (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++
++      switch (t[0]) {
++              case 0xe3: 
++              {       /*edonkey*/
++                      switch (t[1]) 
++                      {
++                              /* client -> server status request */
++                              case 0x96: 
++                                      if (packet_len == 14) return ((IPP2P_EDK * 100) + 50);
++                                      break;
++                              /* server -> client status request */
++                              case 0x97: if (packet_len == 42) return ((IPP2P_EDK * 100) + 51);
++                                      break;
++                                              /* server description request */
++                                              /* e3 2a ff f0 .. | size == 6 */
++                              case 0xa2: if ( (packet_len == 14) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52);
++                                      break;
++                                              /* server description response */
++                                              /* e3 a3 ff f0 ..  | size > 40 && size < 200 */
++                              //case 0xa3: return ((IPP2P_EDK * 100) + 53);
++                              //      break;
++                              case 0x9a: if (packet_len==26) return ((IPP2P_EDK * 100) + 54);
++                                      break;
++
++                              case 0x92: if (packet_len==18) return ((IPP2P_EDK * 100) + 55);
++                                      break;
++                      }
++                      break;
++              }
++              case 0xe4: 
++              {
++                      switch (t[1]) 
++                      {
++                                              /* e4 20 .. | size == 43 */
++                              case 0x20: if ((packet_len == 43) && (t[2] != 0x00) && (t[34] != 0x00)) return ((IPP2P_EDK * 100) + 60);
++                                      break;
++                                              /* e4 00 .. 00 | size == 35 ? */
++                              case 0x00: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 61);
++                                      break;
++                                              /* e4 10 .. 00 | size == 35 ? */
++                              case 0x10: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 62);
++                                      break;
++                                              /* e4 18 .. 00 | size == 35 ? */
++                              case 0x18: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 63);
++                                      break;
++                                              /* e4 52 .. | size = 44 */
++                              case 0x52: if (packet_len == 44 ) return ((IPP2P_EDK * 100) + 64);
++                                      break;
++                                              /* e4 58 .. | size == 6 */
++                              case 0x58: if (packet_len == 14 ) return ((IPP2P_EDK * 100) + 65);
++                                      break;
++                                              /* e4 59 .. | size == 2 */
++                              case 0x59: if (packet_len == 10 )return ((IPP2P_EDK * 100) + 66);
++                                      break;
++                                      /* e4 28 .. | packet_len == 52,77,102,127... */
++                              case 0x28: if (((packet_len-52) % 25) == 0) return ((IPP2P_EDK * 100) + 67);
++                                      break;
++                                      /* e4 50 xx xx | size == 4 */
++                              case 0x50: if (packet_len == 12) return ((IPP2P_EDK * 100) + 68);
++                                      break;
++                                      /* e4 40 xx xx | size == 48 */
++                              case 0x40: if (packet_len == 56) return ((IPP2P_EDK * 100) + 69);
++                                      break;
++                      }
++                      break;
++              }
++      } /* end of switch (t[0]) */
++    return 0;
++}/*udp_search_edk*/
++
++
++/*Search for UDP Gnutella commands*/
++int
++udp_search_gnu (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++    
++    if (memcmp(t, "GND", 3) == 0) return ((IPP2P_GNU * 100) + 51);
++    if (memcmp(t, "GNUTELLA ", 9) == 0) return ((IPP2P_GNU * 100) + 52);
++    return 0;
++}/*udp_search_gnu*/
++
++
++/*Search for UDP KaZaA commands*/
++int
++udp_search_kazaa (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    
++    if (t[packet_len-1] == 0x00){
++      t += (packet_len - 6);
++      if (memcmp(t, "KaZaA", 5) == 0) return (IPP2P_KAZAA * 100 +50);
++    }
++    
++    return 0;
++}/*udp_search_kazaa*/
++
++/*Search for UDP DirectConnect commands*/
++int
++udp_search_directconnect (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    if ((*(t + 8) == 0x24) && (*(t + packet_len - 1) == 0x7c)) {
++      t+=8;
++      if (memcmp(t, "SR ", 3) == 0)                   return ((IPP2P_DC * 100) + 60);
++      if (memcmp(t, "Ping ", 5) == 0)                 return ((IPP2P_DC * 100) + 61);
++    }
++    return 0;
++}/*udp_search_directconnect*/
++
++
++
++/*Search for UDP BitTorrent commands*/
++int
++udp_search_bit (unsigned char *haystack, int packet_len)
++{
++      switch(packet_len)
++      {
++              case 24:
++                      /* ^ 00 00 04 17 27 10 19 80 */
++                      if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980)) 
++                              return (IPP2P_BIT * 100 + 50);
++                      break;
++              case 44:
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) 
++                              return (IPP2P_BIT * 100 + 51);
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000400))
++                              return (IPP2P_BIT * 100 + 61);
++                      break;
++              case 65:
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) 
++                              return (IPP2P_BIT * 100 + 52);
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000404))
++                              return (IPP2P_BIT * 100 + 62);
++                      break;
++              case 67:
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) 
++                              return (IPP2P_BIT * 100 + 53);
++                      if (get_u32(haystack, 16) == __constant_htonl(0x00000406))
++                              return (IPP2P_BIT * 100 + 63);
++                      break;
++              case 211:
++                      if (get_u32(haystack, 8) == __constant_htonl(0x00000405)) 
++                              return (IPP2P_BIT * 100 + 54);
++                      break;
++              case 29:
++                      if ((get_u32(haystack, 8) == __constant_htonl(0x00000401))) 
++                              return (IPP2P_BIT * 100 + 55);
++                      break;
++              case 52:
++                      if (get_u32(haystack,8)  == __constant_htonl(0x00000827) &&
++                      get_u32(haystack,12) == __constant_htonl(0x37502950))
++                              return (IPP2P_BIT * 100 + 80);
++                      break;
++              default:
++                      /* this packet does not have a constant size */
++                      if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104)) 
++                              return (IPP2P_BIT * 100 + 56);
++                      break;
++      }
++    
++      /* some extra-bitcomet rules:
++      * "d1:" [a|r] "d2:id20:"
++      */
++      if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' )
++      {
++              if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r')
++              {
++                      if (memcmp(haystack+12,"d2:id20:",8)==0)
++                              return (IPP2P_BIT * 100 + 57);
++              }
++      }
++    
++#if 0
++      /* bitlord rules */
++      /* packetlen must be bigger than 40 */
++      /* first 4 bytes are zero */
++      if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000)
++      {
++              /* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/
++              if (get_u32(haystack, 12) == 0x00000000 && 
++                  get_u32(haystack, 16) == 0x00010000 &&
++                  get_u32(haystack, 24) == 0x00000000 )
++                      return (IPP2P_BIT * 100 + 71);
++                      
++              /* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/
++              if (get_u32(haystack, 12) == 0x00000001 && 
++                  get_u32(haystack, 16) == 0x000d0000 &&
++                  get_u32(haystack, 24) == 0x00000000 )
++                      return (IPP2P_BIT * 100 + 71);
++              
++                  
++      }
++#endif
++
++    return 0;
++}/*udp_search_bit*/
++
++
++
++/*Search for Ares commands*/
++//#define IPP2P_DEBUG_ARES
++int
++search_ares (const unsigned char *payload, const u16 plen)
++//int search_ares (unsigned char *haystack, int packet_len, int head_len)
++{
++//    const unsigned char *t = haystack + head_len;
++      
++      /* all ares packets start with  */
++      if (payload[1] == 0 && (plen - payload[0]) == 3)
++      {
++              switch (payload[2])
++              {
++                      case 0x5a:
++                              /* ares connect */
++                              if ( plen == 6 && payload[5] == 0x05 ) return ((IPP2P_ARES * 100) + 1);
++                              break;
++                      case 0x09:
++                              /* ares search, min 3 chars --> 14 bytes
++                               * lets define a search can be up to 30 chars --> max 34 bytes
++                               */
++                              if ( plen >= 14 && plen <= 34 ) return ((IPP2P_ARES * 100) + 1);
++                              break;
++#ifdef IPP2P_DEBUG_ARES
++                      default:
++                      printk(KERN_DEBUG "Unknown Ares command %x recognized, len: %u \n", (unsigned int) payload[2],plen);
++#endif /* IPP2P_DEBUG_ARES */
++              }
++      }
++
++#if 0         
++      /* found connect packet: 03 00 5a 04 03 05 */
++      /* new version ares 1.8: 03 00 5a xx xx 05 */
++    if ((plen) == 6){ /* possible connect command*/
++      if ((payload[0] == 0x03) && (payload[1] == 0x00) && (payload[2] == 0x5a) && (payload[5] == 0x05))
++          return ((IPP2P_ARES * 100) + 1);
++    }
++    if ((plen) == 60){        /* possible download command*/
++      if ((payload[59] == 0x0a) && (payload[58] == 0x0a)){
++          if (memcmp(t, "PUSH SHA1:", 10) == 0) /* found download command */
++              return ((IPP2P_ARES * 100) + 2);
++      }
++    }
++#endif
++
++    return 0;
++} /*search_ares*/
++
++/*Search for SoulSeek commands*/
++int
++search_soul (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_SOUL
++    /* match: xx xx xx xx | xx = sizeof(payload) - 4 */
++    if (get_u32(payload, 0) == (plen - 4)){
++      const __u32 m=get_u32(payload, 4);
++      /* match 00 yy yy 00, yy can be everything */
++        if ( get_u8(payload, 4) == 0x00 && get_u8(payload, 7) == 0x00 )
++      {
++#ifdef IPP2P_DEBUG_SOUL
++      printk(KERN_DEBUG "0: Soulseek command 0x%x recognized\n",get_u32(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 1);
++      }
++      
++        /* next match: 01 yy 00 00 | yy can be everything */
++        if ( get_u8(payload, 4) == 0x01 && get_u16(payload, 6) == 0x0000 )
++      {
++#ifdef IPP2P_DEBUG_SOUL
++      printk(KERN_DEBUG "1: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 2);
++      }
++      
++      /* other soulseek commandos are: 1-5,7,9,13-18,22,23,26,28,35-37,40-46,50,51,60,62-69,91,92,1001 */
++      /* try to do this in an intelligent way */
++      /* get all small commandos */
++      switch(m)
++      {
++              case 7:
++              case 9:
++              case 22:
++              case 23:
++              case 26:
++              case 28:
++              case 50:
++              case 51:
++              case 60:
++              case 91:
++              case 92:
++              case 1001:
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "2: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 3);
++      }
++      
++      if (m > 0 && m < 6 ) 
++      {
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "3: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 4);
++      }
++      if (m > 12 && m < 19 )
++      {
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "4: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 5);
++      }
++
++      if (m > 34 && m < 38 )
++      {
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "5: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 6);
++      }
++
++      if (m > 39 && m < 47 )
++      {
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "6: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 7);
++      }
++
++      if (m > 61 && m < 70 ) 
++      {
++#ifdef IPP2P_DEBUG_SOUL
++              printk(KERN_DEBUG "7: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++              return ((IPP2P_SOUL * 100) + 8);
++      }
++
++#ifdef IPP2P_DEBUG_SOUL
++      printk(KERN_DEBUG "unknown SOULSEEK command: 0x%x, first 16 bit: 0x%x, first 8 bit: 0x%x ,soulseek ???\n",get_u32(payload, 4),get_u16(payload, 4) >> 16,get_u8(payload, 4) >> 24);
++#endif /* IPP2P_DEBUG_SOUL */
++    }
++      
++      /* match 14 00 00 00 01 yy 00 00 00 STRING(YY) 01 00 00 00 00 46|50 00 00 00 00 */
++      /* without size at the beginning !!! */
++      if ( get_u32(payload, 0) == 0x14 && get_u8(payload, 4) == 0x01 )
++      {
++              __u32 y=get_u32(payload, 5);
++              /* we need 19 chars + string */
++              if ( (y + 19) <= (plen) )
++              {
++                      const unsigned char *w=payload+9+y;
++                      if (get_u32(w, 0) == 0x01 && ( get_u16(w, 4) == 0x4600 || get_u16(w, 4) == 0x5000) && get_u32(w, 6) == 0x00);
++#ifdef IPP2P_DEBUG_SOUL
++                      printk(KERN_DEBUG "Soulssek special client command recognized\n");
++#endif /* IPP2P_DEBUG_SOUL */
++                      return ((IPP2P_SOUL * 100) + 9);
++              }
++      }
++    return 0;
++}
++
++
++/*Search for WinMX commands*/
++int
++search_winmx (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_WINMX
++    if (((plen) == 4) && (memcmp(payload, "SEND", 4) == 0))  return ((IPP2P_WINMX * 100) + 1);
++    if (((plen) == 3) && (memcmp(payload, "GET", 3) == 0))  return ((IPP2P_WINMX * 100) + 2);
++    //if (packet_len < (head_len + 10)) return 0;
++    if (plen < 10) return 0;
++    
++    if ((memcmp(payload, "SEND", 4) == 0) || (memcmp(payload, "GET", 3) == 0)){
++        u16 c=4;
++        const u16 end=plen-2;
++        u8 count=0;
++        while (c < end)
++        {
++              if (payload[c]== 0x20 && payload[c+1] == 0x22)
++              {
++                      c++;
++                      count++;
++                      if (count>=2) return ((IPP2P_WINMX * 100) + 3);
++              }
++              c++;
++        }
++    }
++    
++    if ( plen == 149 && payload[0] == '8' )
++    {
++#ifdef IPP2P_DEBUG_WINMX
++      printk(KERN_INFO "maybe WinMX\n");
++#endif
++      if (get_u32(payload,17) == 0 && get_u32(payload,21) == 0 && get_u32(payload,25) == 0 &&
++//                get_u32(payload,33) == __constant_htonl(0x71182b1a) && get_u32(payload,37) == __constant_htonl(0x05050000) &&
++//                get_u32(payload,133) == __constant_htonl(0x31097edf) && get_u32(payload,145) == __constant_htonl(0xdcb8f792))
++          get_u16(payload,39) == 0 && get_u16(payload,135) == __constant_htons(0x7edf) && get_u16(payload,147) == __constant_htons(0xf792))
++          
++      {
++#ifdef IPP2P_DEBUG_WINMX
++              printk(KERN_INFO "got WinMX\n");
++#endif
++              return ((IPP2P_WINMX * 100) + 4);
++      }
++    }
++    return 0;
++} /*search_winmx*/
++
++
++/*Search for appleJuice commands*/
++int
++search_apple (const unsigned char *payload, const u16 plen)
++{
++    if ( (plen > 7) && (payload[6] == 0x0d) && (payload[7] == 0x0a) && (memcmp(payload, "ajprot", 6) == 0))  return (IPP2P_APPLE * 100);
++    
++    return 0;
++}
++
++
++/*Search for BitTorrent commands*/
++int
++search_bittorrent (const unsigned char *payload, const u16 plen)
++{
++    if (plen > 20)
++    {
++      /* test for match 0x13+"BitTorrent protocol" */
++      if (payload[0] == 0x13) 
++      {
++              if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100);
++      }
++      
++      /* get tracker commandos, all starts with GET /
++      * then it can follow: scrape| announce
++      * and then ?hash_info=
++      */
++      if (memcmp(payload,"GET /",5) == 0)
++      {
++              /* message scrape */
++              if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1);
++              /* message announce */
++              if ( memcmp(payload+5,"announce?info_hash=",19)==0 ) return (IPP2P_BIT * 100 + 2);
++      }
++    } 
++    else 
++    {
++      /* bitcomet encryptes the first packet, so we have to detect another 
++       * one later in the flow */
++       /* first try failed, too many missdetections */
++      //if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3);
++      
++      /* second try: block request packets */
++      if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && payload[4] == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 3);
++    }
++
++    return 0;
++}
++
++
++
++/*check for Kazaa get command*/
++int
++search_kazaa (const unsigned char *payload, const u16 plen)
++
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a) && memcmp(payload, "GET /.hash=", 11) == 0)
++      return (IPP2P_DATA_KAZAA * 100);
++
++    return 0;
++}
++
++
++/*check for gnutella get command*/
++int
++search_gnu (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++      if (memcmp(payload, "GET /get/", 9) == 0)       return ((IPP2P_DATA_GNU * 100) + 1);
++      if (memcmp(payload, "GET /uri-res/", 13) == 0) return ((IPP2P_DATA_GNU * 100) + 2); 
++    }
++    return 0;
++}
++
++
++/*check for gnutella get commands and other typical data*/
++int
++search_all_gnu (const unsigned char *payload, const u16 plen)
++{
++    
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++      
++      if (memcmp(payload, "GNUTELLA CONNECT/", 17) == 0) return ((IPP2P_GNU * 100) + 1);
++      if (memcmp(payload, "GNUTELLA/", 9) == 0) return ((IPP2P_GNU * 100) + 2);    
++    
++    
++      if ((memcmp(payload, "GET /get/", 9) == 0) || (memcmp(payload, "GET /uri-res/", 13) == 0))
++      {        
++              u16 c=8;
++              const u16 end=plen-22;
++              while (c < end) {
++                      if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Gnutella-", 11) == 0) || (memcmp(&payload[c+2], "X-Queue:", 8) == 0))) 
++                              return ((IPP2P_GNU * 100) + 3);
++                      c++;
++              }
++      }
++    }
++    return 0;
++}
++
++
++/*check for KaZaA download commands and other typical data*/
++int
++search_all_kazaa (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++
++      if (memcmp(payload, "GIVE ", 5) == 0) return ((IPP2P_KAZAA * 100) + 1);
++    
++      if (memcmp(payload, "GET /", 5) == 0) {
++              u16 c = 8;
++              const u16 end=plen-22;
++              while (c < end) {
++                      if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Kazaa-Username: ", 18) == 0) || (memcmp(&payload[c+2], "User-Agent: PeerEnabler/", 24) == 0)))
++                              return ((IPP2P_KAZAA * 100) + 2);
++                      c++;
++              }
++      }
++    }
++    return 0;
++}
++
++/*fast check for edonkey file segment transfer command*/
++int
++search_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3) 
++      return 0;
++    else {
++      if (payload[5] == 0x47) 
++          return (IPP2P_DATA_EDK * 100);
++      else    
++          return 0;
++    }
++}
++
++
++
++/*intensive but slower search for some edonkey packets including size-check*/
++int
++search_all_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3) 
++      return 0;
++    else {
++      //t += head_len;        
++      const u16 cmd = get_u16(payload, 1);
++      if (cmd == (plen - 5)) {
++          switch (payload[5]) {
++              case 0x01: return ((IPP2P_EDK * 100) + 1);      /*Client: hello or Server:hello*/
++              case 0x4c: return ((IPP2P_EDK * 100) + 9);      /*Client: Hello-Answer*/
++          }
++      }
++      return 0;
++     }
++}
++
++
++/*fast check for Direct Connect send command*/
++int
++search_dc (const unsigned char *payload, const u16 plen)
++{
++
++    if (payload[0] != 0x24 ) 
++      return 0;
++    else {
++      if (memcmp(&payload[1], "Send|", 5) == 0)
++          return (IPP2P_DATA_DC * 100);
++      else
++          return 0;
++    } 
++
++}
++
++
++/*intensive but slower check for all direct connect packets*/
++int
++search_all_dc (const unsigned char *payload, const u16 plen)
++{
++//    unsigned char *t = haystack;
++
++    if (payload[0] == 0x24 && payload[plen-1] == 0x7c) 
++    {
++      const unsigned char *t=&payload[1];
++              /* Client-Hub-Protocol */
++      if (memcmp(t, "Lock ", 5) == 0)                 return ((IPP2P_DC * 100) + 1);
++      /* Client-Client-Protocol, some are already recognized by client-hub (like lock) */
++      if (memcmp(t, "MyNick ", 7) == 0)               return ((IPP2P_DC * 100) + 38); 
++    }
++    return 0;
++}
++
++/*check for mute*/
++int
++search_mute (const unsigned char *payload, const u16 plen)
++{
++      if ( plen == 209 || plen == 345 || plen == 473 || plen == 609 || plen == 1121 )
++      {
++              //printk(KERN_DEBUG "size hit: %u",size);
++              if (memcmp(payload,"PublicKey: ",11) == 0 )
++              { 
++                      return ((IPP2P_MUTE * 100) + 0);
++                      
++/*                    if (memcmp(t+size-14,"\x0aEndPublicKey\x0a",14) == 0)
++                      {
++                              printk(KERN_DEBUG "end pubic key hit: %u",size);
++                              
++                      }*/
++              }
++      }
++      return 0;
++}
++
++
++/* check for xdcc */
++int
++search_xdcc (const unsigned char *payload, const u16 plen)
++{
++      /* search in small packets only */
++      if (plen > 20 && plen < 200 && payload[plen-1] == 0x0a && payload[plen-2] == 0x0d && memcmp(payload,"PRIVMSG ",8) == 0)
++      {
++              
++              u16 x=10;
++              const u16 end=plen - 13;
++              
++              /* is seems to be a irc private massage, chedck for xdcc command */
++              while (x < end)
++              {
++                      if (payload[x] == ':')
++                      {
++                              if ( memcmp(&payload[x+1],"xdcc send #",11) == 0 )
++                                      return ((IPP2P_XDCC * 100) + 0);
++                      }
++                      x++;
++              }
++      }
++      return 0;
++}
++
++/* search for waste */
++int search_waste(const unsigned char *payload, const u16 plen)
++{
++      if ( plen >= 8 && memcmp(payload,"GET.sha1:",9) == 0)
++              return ((IPP2P_WASTE * 100) + 0);
++
++      return 0;
++}
++
++
++static struct {
++    int command;
++    __u8 short_hand;                  /*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (const unsigned char *, const u16);
++} matchlist[] = {
++    {IPP2P_EDK,SHORT_HAND_IPP2P,20, &search_all_edk},
++//    {IPP2P_DATA_KAZAA,SHORT_HAND_DATA,200, &search_kazaa},
++//    {IPP2P_DATA_EDK,SHORT_HAND_DATA,60, &search_edk},
++//    {IPP2P_DATA_DC,SHORT_HAND_DATA,26, &search_dc},
++    {IPP2P_DC,SHORT_HAND_IPP2P,5, search_all_dc},
++//    {IPP2P_DATA_GNU,SHORT_HAND_DATA,40, &search_gnu},
++    {IPP2P_GNU,SHORT_HAND_IPP2P,5, &search_all_gnu},
++    {IPP2P_KAZAA,SHORT_HAND_IPP2P,5, &search_all_kazaa},
++    {IPP2P_BIT,SHORT_HAND_IPP2P,20, &search_bittorrent},
++    {IPP2P_APPLE,SHORT_HAND_IPP2P,5, &search_apple},
++    {IPP2P_SOUL,SHORT_HAND_IPP2P,5, &search_soul},
++    {IPP2P_WINMX,SHORT_HAND_IPP2P,2, &search_winmx},
++    {IPP2P_ARES,SHORT_HAND_IPP2P,5, &search_ares},
++    {IPP2P_MUTE,SHORT_HAND_NONE,200, &search_mute},
++    {IPP2P_WASTE,SHORT_HAND_NONE,5, &search_waste},
++    {IPP2P_XDCC,SHORT_HAND_NONE,5, &search_xdcc},
++    {0,0,0,NULL}
++};
++
++
++static struct {
++    int command;
++    __u8 short_hand;                  /*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (unsigned char *, int);
++} udp_list[] = {
++    { IPP2P_KAZAA, SHORT_HAND_IPP2P, 14, &udp_search_kazaa},
++    { IPP2P_BIT,   SHORT_HAND_IPP2P, 23, &udp_search_bit},
++    { IPP2P_GNU,   SHORT_HAND_IPP2P, 11, &udp_search_gnu},
++    { IPP2P_EDK,   SHORT_HAND_IPP2P,  9, &udp_search_edk},
++    { IPP2P_DC,    SHORT_HAND_IPP2P, 12, &udp_search_directconnect},    
++    { 0, 0, 0, NULL }
++};
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const void *matchinfo,
++      int offset,
++      const void *hdr,
++      u_int16_t datalen,
++      int *hotdrop) 
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const void *matchinfo,
++      int offset,
++      int *hotdrop) 
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const void *matchinfo,
++      int offset,
++      unsigned int protoff,
++      int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const struct xt_match *match,
++      const void *matchinfo,
++      int offset,
++      unsigned int protoff,
++      int *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const struct xt_match *match,
++      const void *matchinfo,
++      int offset, 
++      unsigned int protoff, 
++      bool *hotdrop)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
++static bool
++match(const struct sk_buff *skb,
++      const struct xt_match_param *par)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
++static int
++match(const struct sk_buff *skb,
++      struct xt_action_param *par)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++      const struct ipt_p2p_info *info = matchinfo;
++#else
++      const struct ipt_p2p_info *info = par->matchinfo;
++      const int offset = par->fragoff;
++#endif
++    unsigned char  *haystack;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++    struct iphdr *ip = ip_hdr(skb);
++#else
++    struct iphdr *ip = skb->nh.iph;
++#endif
++    int p2p_result = 0, i = 0;
++//    int head_len;
++    int hlen = ntohs(ip->tot_len)-(ip->ihl*4);        /*hlen = packet-data length*/
++
++    /*must not be a fragment*/
++    if (offset) {
++      if (info->debug) printk("IPP2P.match: offset found %i \n",offset);
++      return 0;
++    }
++    
++    /*make sure that skb is linear*/
++    if(skb_is_nonlinear(skb)){
++      if (info->debug) printk("IPP2P.match: nonlinear skb found\n");
++      return 0;
++    }
++
++
++    haystack=(char *)ip+(ip->ihl*4);          /*haystack = packet data*/
++
++    switch (ip->protocol){
++      case IPPROTO_TCP:               /*what to do with a TCP packet*/
++      {
++          struct tcphdr *tcph = (void *) ip + ip->ihl * 4;
++          
++          if (tcph->fin) return 0;  /*if FIN bit is set bail out*/
++          if (tcph->syn) return 0;  /*if SYN bit is set bail out*/
++          if (tcph->rst) return 0;  /*if RST bit is set bail out*/
++          
++          haystack += tcph->doff * 4; /*get TCP-Header-Size*/
++          hlen -= tcph->doff * 4;
++          while (matchlist[i].command) {
++              if ((((info->cmd & matchlist[i].command) == matchlist[i].command) ||
++                  ((info->cmd & matchlist[i].short_hand) == matchlist[i].short_hand)) &&
++                  (hlen > matchlist[i].packet_len)) {
++                          p2p_result = matchlist[i].function_name(haystack, hlen);
++                          if (p2p_result) 
++                          {
++                              if (info->debug) printk("IPP2P.debug:TCP-match: %i from: %pl4:%i to: %pl4:%i Length: %i\n", 
++                                  p2p_result, &ip->saddr,ntohs(tcph->source), &ip->daddr,ntohs(tcph->dest),hlen);
++                              return p2p_result;
++                          }
++              }
++          i++;
++          }
++          return p2p_result;
++      }
++      
++      case IPPROTO_UDP:               /*what to do with an UDP packet*/
++      {
++          struct udphdr *udph = (void *) ip + ip->ihl * 4;
++          
++          while (udp_list[i].command){
++              if ((((info->cmd & udp_list[i].command) == udp_list[i].command) ||
++                  ((info->cmd & udp_list[i].short_hand) == udp_list[i].short_hand)) &&
++                  (hlen > udp_list[i].packet_len)) {
++                          p2p_result = udp_list[i].function_name(haystack, hlen);
++                          if (p2p_result){
++                              if (info->debug) printk("IPP2P.debug:UDP-match: %i from: %pl4:%i to: %pl4:%i Length: %i\n", 
++                                  p2p_result, &ip->saddr,ntohs(udph->source), &ip->daddr,ntohs(udph->dest),hlen);
++                              return p2p_result;
++                          }
++              }
++          i++;
++          }                   
++          return p2p_result;
++      }
++    
++      default: return 0;
++    }
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++static int
++checkentry(const char *tablename,
++         const struct ipt_ip *ip,
++         void *matchinfo,
++         unsigned int matchsize,
++         unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++static int
++checkentry(const char *tablename,
++         const void *inf,
++         void *matchinfo,
++         unsigned int matchsize,
++         unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++static int
++checkentry(const char *tablename,
++         const void *inf,
++         const struct xt_match *match,
++         void *matchinfo,
++         unsigned int matchsize,
++         unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++static int
++checkentry(const char *tablename,
++         const void *inf,
++         const struct xt_match *match,
++         void *matchinfo,
++         unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
++static bool
++checkentry(const char *tablename,
++         const void *inf,
++         const struct xt_match *match,
++         void *matchinfo,
++         unsigned int hook_mask)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
++static bool
++checkentry(const struct xt_mtchk_param *par)
++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
++static int
++checkentry(const struct xt_mtchk_param *par)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
++    return 1;
++#else
++    return 0;
++#endif
++}
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct xt_match ipp2p_match = {
++#else
++static struct ipt_match ipp2p_match = { 
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++      { NULL, NULL }, 
++      "ipp2p", 
++      &ipp2p_match, 
++      &ipp2p_checkentry, 
++      NULL, 
++      THIS_MODULE
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++      .name           = "ipp2p",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++      .family         = AF_INET,
++#endif
++      .match          = &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++      .matchsize      = sizeof(struct ipt_p2p_info),
++#endif
++      .checkentry     = &checkentry,
++      .me             = THIS_MODULE,
++#endif
++};
++
++
++static int __init init(void)
++{
++    printk(KERN_INFO "IPP2P v%s loading\n", IPP2P_VERSION);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++    return xt_register_match(&ipp2p_match);
++#else
++    return ipt_register_match(&ipp2p_match);
++#endif
++}
++      
++static void __exit fini(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++    xt_unregister_match(&ipp2p_match);
++#else
++    ipt_unregister_match(&ipp2p_match);
++#endif
++    printk(KERN_INFO "IPP2P v%s unloaded\n", IPP2P_VERSION);    
++}
++      
++module_init(init);
++module_exit(fini);
++
++
+diff -Naur linux-3.10.9.org/net/ipv4/netfilter/Kconfig linux-3.10.9/net/ipv4/netfilter/Kconfig
+--- linux-3.10.9.org/net/ipv4/netfilter/Kconfig        2013-08-21 00:40:47.000000000 +0200
++++ linux-3.10.9/net/ipv4/netfilter/Kconfig    2013-08-25 16:00:53.398088168 +0200
+@@ -320,5 +320,15 @@
+ endif # IP_NF_ARPTABLES
++config IP_NF_MATCH_IPP2P
++      tristate  'IPP2P match support'
++      depends on IP_NF_IPTABLES
++      help
++        This option makes possible to match some P2P packets
++        therefore helps controlling such traffic.
++      
++        If you want to compile it as a module, say M here and read
++        <file:Documentation/modules.txt>.  If unsure, say `N'.
++
+ endmenu
+diff -Naur linux-3.10.9.org/net/ipv4/netfilter/Makefile linux-3.10.9/net/ipv4/netfilter/Makefile
+--- linux-3.10.9.org/net/ipv4/netfilter/Makefile       2013-08-21 00:40:47.000000000 +0200
++++ linux-3.10.9/net/ipv4/netfilter/Makefile   2013-08-25 16:03:21.634750053 +0200
+@@ -40,6 +40,7 @@
+ # matches
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
++obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
+ # targets
+ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
diff --git a/src/patches/linux-3.7-disable-compat_vdso.patch b/src/patches/linux-3.7-disable-compat_vdso.patch
new file mode 100644 (file)
index 0000000..c06bd8b
--- /dev/null
@@ -0,0 +1,46 @@
+No need to wrap vdso calls as gentoo does not use any version of 
+glibc <=2.3.3
+---
+From: Gordon Malm <gengor@gentoo.org>
+From: Kerin Millar <kerframil@gmail.com>
+From: Jory A. Pratt    <anarchy@gentoo.org>
+
+COMPAT_VDSO is inappropriate for any modern Hardened Gentoo system. It
+conflicts with various parts of PaX, crashing the system if enabled
+while PaX's NOEXEC or UDEREF features are active. Moreover, it prevents
+a number of important PaX options from appearing in the configuration
+menu, including all PaX NOEXEC implementations. Unfortunately, the
+reason for the disappearance of these PaX configuration options is
+often far from obvious to inexperienced users.
+
+Therefore, we disable the COMPAT_VDSO menu entry entirely. However,
+COMPAT_VDSO operation can still be enabled via bootparam and sysctl
+interfaces. Consequently, we must also disable the ability to select
+COMPAT_VDSO operation at boot or runtime. Here we patch the kernel so
+that selecting COMPAT_VDSO operation at boot/runtime has no effect if
+conflicting PaX options are enabled, leaving VDSO_ENABLED operation
+intact.
+
+Closes bug: http://bugs.gentoo.org/show_bug.cgi?id=210138
+
+diff -urp a/arch/x86/Kconfig b/arch/x86/Kconfig
+--- a/arch/x86/Kconfig 2009-07-31 01:36:57.323857684 +0100
++++ b/arch/x86/Kconfig 2009-07-31 01:51:39.395749681 +0100
+@@ -1651,17 +1651,8 @@
+ config COMPAT_VDSO
+       def_bool n
+-      prompt "Compat VDSO support"
+       depends on X86_32 || IA32_EMULATION
+       depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+-      ---help---
+-        Map the 32-bit VDSO to the predictable old-style address too.
+-
+-        Say N here if you are running a sufficiently recent glibc
+-        version (2.3.3 or later), to remove the high-mapped
+-        VDSO mapping and to exclusively use the randomized VDSO.
+-
+-        If unsure, say Y.
+ config CMDLINE_BOOL
+       bool "Built-in kernel command line"
diff --git a/src/patches/net-tools-1.60-kernel_headers-3.patch b/src/patches/net-tools-1.60-kernel_headers-3.patch
new file mode 100644 (file)
index 0000000..89988f0
--- /dev/null
@@ -0,0 +1,55 @@
+diff -Naur net-tools-1.60.org/hostname.c net-tools-1.60/hostname.c
+--- net-tools-1.60.org/hostname.c      2001-04-08 19:04:23.000000000 +0200
++++ net-tools-1.60/hostname.c  2013-08-26 11:56:50.131844273 +0200
+@@ -42,10 +42,16 @@
+ #include "config.h"
+ #include "version.h"
+ #include "../intl.h"
++#include <linux/version.h>
+ #if HAVE_AFDECnet
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+ #include <netdnet/dn.h>
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
++#include <linux/dn.h>
++#endif
++#endif
+ char *Release = RELEASE, *Version = "hostname 1.100 (2001-04-14)";
+diff -Naur net-tools-1.60.org/lib/tr.c net-tools-1.60/lib/tr.c
+--- net-tools-1.60.org/lib/tr.c        2000-02-20 22:46:45.000000000 +0100
++++ net-tools-1.60/lib/tr.c    2013-08-26 11:57:33.675175033 +0200
+@@ -20,7 +20,7 @@
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <net/if_arp.h>
+-#include <linux/if_tr.h>
++#include <netinet/if_tr.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <errno.h>
+diff -Naur net-tools-1.60.org/lib/x25_sr.c net-tools-1.60/lib/x25_sr.c
+--- net-tools-1.60.org/lib/x25_sr.c    2000-05-20 15:38:10.000000000 +0200
++++ net-tools-1.60/lib/x25_sr.c        2013-08-26 11:56:50.131844273 +0200
+@@ -22,6 +22,7 @@
+ #include <sys/socket.h>
+ #include <sys/ioctl.h>
+ #include <linux/x25.h>
++#include <linux/version.h>
+ #include <ctype.h>
+ #include <errno.h>
+ #include <netdb.h>
+@@ -77,7 +78,11 @@
+   rt.sigdigits=sigdigits;
+   /* x25_route_struct.address isn't type struct sockaddr_x25, Why? */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+   memcpy(&rt.address, &sx25.sx25_addr, sizeof(x25_address));
++#else
++  memcpy(&rt.address, &sx25.sx25_addr, sizeof(struct x25_address));
++#endif
+   while (*args) {
+       if (!strcmp(*args,"device") || !strcmp(*args,"dev")) {
diff --git a/src/patches/netfilter_layer7_2.22_kernel3.10-no_proc_interface.patch b/src/patches/netfilter_layer7_2.22_kernel3.10-no_proc_interface.patch
new file mode 100644 (file)
index 0000000..5ffdd49
--- /dev/null
@@ -0,0 +1,2166 @@
+diff -Naur linux-3.10.5.org/include/linux/netfilter/xt_layer7.h linux-3.10.5/include/linux/netfilter/xt_layer7.h
+--- linux-3.10.5.org/include/linux/netfilter/xt_layer7.h       1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/include/linux/netfilter/xt_layer7.h   2013-08-07 06:16:14.260806739 +0200
+@@ -0,0 +1,13 @@
++#ifndef _XT_LAYER7_H
++#define _XT_LAYER7_H
++
++#define MAX_PATTERN_LEN 8192
++#define MAX_PROTOCOL_LEN 256
++
++struct xt_layer7_info {
++    char protocol[MAX_PROTOCOL_LEN];
++    char pattern[MAX_PATTERN_LEN];
++    u_int8_t invert;
++};
++
++#endif /* _XT_LAYER7_H */
+diff -Naur linux-3.10.5.org/include/net/netfilter/nf_conntrack.h linux-3.10.5/include/net/netfilter/nf_conntrack.h
+--- linux-3.10.5.org/include/net/netfilter/nf_conntrack.h      2013-08-04 10:51:49.000000000 +0200
++++ linux-3.10.5/include/net/netfilter/nf_conntrack.h  2013-08-07 06:16:14.280806062 +0200
+@@ -105,6 +105,22 @@
+       struct net *ct_net;
+ #endif
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || \
++ defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++      struct {
++              /*
++               * e.g. "http". NULL before decision. "unknown" after decision
++               * if no match.
++               */
++              char *app_proto;
++              /*
++               * application layer data so far. NULL after match decision.
++               */
++              char *app_data;
++              unsigned int app_data_len;
++      } layer7;
++#endif
++
+       /* Storage reserved for other modules, must be the last member */
+       union nf_conntrack_proto proto;
+ };
+diff -Naur linux-3.10.5.org/net/netfilter/Kconfig linux-3.10.5/net/netfilter/Kconfig
+--- linux-3.10.5.org/net/netfilter/Kconfig     2013-08-04 10:51:49.000000000 +0200
++++ linux-3.10.5/net/netfilter/Kconfig 2013-08-07 06:16:14.310805048 +0200
+@@ -1205,6 +1205,26 @@
+         To compile it as a module, choose M here.  If unsure, say N.
++config NETFILTER_XT_MATCH_LAYER7
++      tristate '"layer7" match support'
++      depends on NETFILTER_XTABLES
++      depends on IP_NF_CONNTRACK || NF_CONNTRACK
++      help
++        Say Y if you want to be able to classify connections (and their
++        packets) based on regular expression matching of their application
++        layer data.   This is one way to classify applications such as
++        peer-to-peer filesharing systems that do not always use the same
++        port.
++
++        To compile it as a module, choose M here.  If unsure, say N.
++
++config NETFILTER_XT_MATCH_LAYER7_DEBUG
++        bool 'Layer 7 debugging output'
++        depends on NETFILTER_XT_MATCH_LAYER7
++        help
++          Say Y to get lots of debugging output.
++
++
+ config NETFILTER_XT_MATCH_STATISTIC
+       tristate '"statistic" match support'
+       depends on NETFILTER_ADVANCED
+diff -Naur linux-3.10.5.org/net/netfilter/Makefile linux-3.10.5/net/netfilter/Makefile
+--- linux-3.10.5.org/net/netfilter/Makefile    2013-08-04 10:51:49.000000000 +0200
++++ linux-3.10.5/net/netfilter/Makefile        2013-08-07 06:16:14.320804710 +0200
+@@ -134,6 +134,7 @@
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SOCKET) += xt_socket.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
+diff -Naur linux-3.10.5.org/net/netfilter/nf_conntrack_core.c linux-3.10.5/net/netfilter/nf_conntrack_core.c
+--- linux-3.10.5.org/net/netfilter/nf_conntrack_core.c 2013-08-04 10:51:49.000000000 +0200
++++ linux-3.10.5/net/netfilter/nf_conntrack_core.c     2013-08-07 06:20:34.941991510 +0200
+@@ -1,3 +1,6 @@
++
++
++
+ /* Connection state tracking for netfilter.  This is separated from,
+    but required by, the NAT layer; it can also be used by an iptables
+    extension. */
+@@ -224,6 +227,13 @@
+        * too. */
+       nf_ct_remove_expectations(ct);
++      #if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++      if(ct->layer7.app_proto)
++              kfree(ct->layer7.app_proto);
++      if(ct->layer7.app_data)
++      kfree(ct->layer7.app_data);
++      #endif
++
+       /* We overload first tuple to link into unconfirmed or dying list.*/
+       BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+       hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+diff -Naur linux-3.10.5.org/net/netfilter/nf_conntrack_standalone.c linux-3.10.5/net/netfilter/nf_conntrack_standalone.c
+--- linux-3.10.5.org/net/netfilter/nf_conntrack_standalone.c   2013-08-04 10:51:49.000000000 +0200
++++ linux-3.10.5/net/netfilter/nf_conntrack_standalone.c       2013-08-07 06:16:14.380802681 +0200
+@@ -240,6 +240,12 @@
+       if (ct_show_delta_time(s, ct))
+               goto release;
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++      if(ct->layer7.app_proto &&
++           seq_printf(s, "l7proto=%s ", ct->layer7.app_proto))
++              return -ENOSPC;
++#endif
++
+       if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
+               goto release;
+diff -Naur linux-3.10.5.org/net/netfilter/regexp/regexp.c linux-3.10.5/net/netfilter/regexp/regexp.c
+--- linux-3.10.5.org/net/netfilter/regexp/regexp.c     1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/net/netfilter/regexp/regexp.c 2013-08-07 06:16:14.410801666 +0200
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c       1.3 of 18 April 87
++ *
++ *    Copyright (c) 1986 by University of Toronto.
++ *    Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *    Permission is granted to anyone to use this software for any
++ *    purpose on any computer system, and to redistribute it freely,
++ *    subject to the following restrictions:
++ *
++ *    1. The author is not responsible for the consequences of use of
++ *            this software, no matter how awful, even if they arise
++ *            from defects in it.
++ *
++ *    2. The origin of this software must not be misrepresented, either
++ *            by explicit claim or by omission.
++ *
++ *    3. Altered versions must be plainly marked as such, and must not
++ *            be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions.  Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt.  Lets it work in both kernel and user space.
++(So iptables can use it, for instance.)  Yea, it goes both ways... */
++#if __KERNEL__
++  #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++  #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++        printk("<3>Regexp: %s\n", s);
++        /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases.  They are:
++ *
++ * regstart   char that must begin a match; '\0' if none obvious
++ * reganch    is the match anchored (at beginning-of-line only)?
++ * regmust    string (pointer into program) that match must include, or NULL
++ * regmlen    length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot.  Regmust permits fast rejection
++ * of lines that cannot possibly match.  The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup).  Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program".  This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology).  Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand.  "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives.  (Here we
++ * have one of the subtle syntax dependencies:  an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.)  The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM.  In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure:  the tail of the branch connects
++ * to the thing following the set of BRANCHes.)  The opcodes are:
++ */
++
++/* definition number  opnd?   meaning */
++#define       END     0       /* no   End of program. */
++#define       BOL     1       /* no   Match "" at beginning of line. */
++#define       EOL     2       /* no   Match "" at end of line. */
++#define       ANY     3       /* no   Match any one character. */
++#define       ANYOF   4       /* str  Match any character in this string. */
++#define       ANYBUT  5       /* str  Match any character not in this string. */
++#define       BRANCH  6       /* node Match this alternative, or the next... */
++#define       BACK    7       /* no   Match "", "next" ptr points backward. */
++#define       EXACTLY 8       /* str  Match this string. */
++#define       NOTHING 9       /* no   Match empty string. */
++#define       STAR    10      /* node Match this (simple) thing 0 or more times. */
++#define       PLUS    11      /* node Match this (simple) thing 1 or more times. */
++#define       OPEN    20      /* no   Mark this point in input as start of #n. */
++                      /*      OPEN+1 is number 1, etc. */
++#define       CLOSE   30      /* no   Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH     The set of branches constituting a single choice are hooked
++ *            together with their "next" pointers, since precedence prevents
++ *            anything being concatenated to any individual branch.  The
++ *            "next" pointer of the last BRANCH in a choice points to the
++ *            thing following the whole choice.  This is also where the
++ *            final "next" pointer of each individual branch points; each
++ *            branch starts with the operand node of a BRANCH node.
++ *
++ * BACK               Normal "next" pointers all implicitly point forward; BACK
++ *            exists to make loop structures possible.
++ *
++ * STAR,PLUS  '?', and complex '*' and '+', are implemented as circular
++ *            BRANCH structures using BACK.  Simple cases (one character
++ *            per match) are implemented with STAR and PLUS for speed
++ *            and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE ...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first.  The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node.  (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define       OP(p)   (*(p))
++#define       NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define       OPERAND(p)      ((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define       UCHARAT(p)      ((int)*(unsigned char *)(p))
++#else
++#define       UCHARAT(p)      ((int)*(p)&CHARBITS)
++#endif
++
++#define       FAIL(m) { regerror(m); return(NULL); }
++#define       ISMULT(c)       ((c) == '*' || (c) == '+' || (c) == '?')
++#define       META    "^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define       HASWIDTH        01      /* Known never to match null string. */
++#define       SIMPLE          02      /* Simple enough to be STAR/PLUS operand. */
++#define       SPSTART         04      /* Starts with * or +. */
++#define       WORST           0       /* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput;               /* String-input pointer. */
++char *regbol;         /* Beginning of input, for ^ check. */
++char **regstartp;     /* Pointer to startp array. */
++char **regendp;               /* Ditto for endp. */
++char *regparse;               /* Input-scan pointer. */
++int regnpar;          /* () count. */
++char regdummy;
++char *regcode;                /* Code-emit pointer; &regdummy = don't. */
++long regsize;         /* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define       STATIC  static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++        char *scan1;
++        char *scan2;
++        int count;
++
++        count = 0;
++        for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++                for (scan2 = (char *)s2; *scan2 != '\0';)       /* ++ moved down. */
++                        if (*scan1 == *scan2++)
++                                return(count);
++                count++;
++        }
++        return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code.  So we cheat:  we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it.  (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++      register regexp *r;
++      register char *scan;
++      register char *longest;
++      register int len;
++      int flags;
++      struct match_globals g;
++      
++      /* commented out by ethan
++         extern char *malloc();
++      */
++
++      if (exp == NULL)
++              FAIL("NULL argument");
++
++      /* First pass: determine size, legality. */
++      g.regparse = exp;
++      g.regnpar = 1;
++      g.regsize = 0L;
++      g.regcode = &g.regdummy;
++      regc(&g, MAGIC);
++      if (reg(&g, 0, &flags) == NULL)
++              return(NULL);
++
++      /* Small enough for pointer-storage convention? */
++      if (g.regsize >= 32767L)                /* Probably could be 65535L. */
++              FAIL("regexp too big");
++
++      /* Allocate space. */
++      *patternsize=sizeof(regexp) + (unsigned)g.regsize;
++      r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++      if (r == NULL)
++              FAIL("out of space");
++
++      /* Second pass: emit code. */
++      g.regparse = exp;
++      g.regnpar = 1;
++      g.regcode = r->program;
++      regc(&g, MAGIC);
++      if (reg(&g, 0, &flags) == NULL)
++              return(NULL);
++
++      /* Dig out information for optimizations. */
++      r->regstart = '\0';     /* Worst-case defaults. */
++      r->reganch = 0;
++      r->regmust = NULL;
++      r->regmlen = 0;
++      scan = r->program+1;                    /* First BRANCH. */
++      if (OP(regnext(&g, scan)) == END) {             /* Only one top-level choice. */
++              scan = OPERAND(scan);
++
++              /* Starting-point info. */
++              if (OP(scan) == EXACTLY)
++                      r->regstart = *OPERAND(scan);
++              else if (OP(scan) == BOL)
++                      r->reganch++;
++
++              /*
++               * If there's something expensive in the r.e., find the
++               * longest literal string that must appear and make it the
++               * regmust.  Resolve ties in favor of later strings, since
++               * the regstart check works with the beginning of the r.e.
++               * and avoiding duplication strengthens checking.  Not a
++               * strong reason, but sufficient in the absence of others.
++               */
++              if (flags&SPSTART) {
++                      longest = NULL;
++                      len = 0;
++                      for (; scan != NULL; scan = regnext(&g, scan))
++                              if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++                                      longest = OPERAND(scan);
++                                      len = strlen(OPERAND(scan));
++                              }
++                      r->regmust = longest;
++                      r->regmlen = len;
++              }
++      }
++
++      return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++      register char *ret;
++      register char *br;
++      register char *ender;
++      register int parno = 0; /* 0 makes gcc happy */
++      int flags;
++
++      *flagp = HASWIDTH;      /* Tentatively. */
++
++      /* Make an OPEN node, if parenthesized. */
++      if (paren) {
++              if (g->regnpar >= NSUBEXP)
++                      FAIL("too many ()");
++              parno = g->regnpar;
++              g->regnpar++;
++              ret = regnode(g, OPEN+parno);
++      } else
++              ret = NULL;
++
++      /* Pick up the branches, linking them together. */
++      br = regbranch(g, &flags);
++      if (br == NULL)
++              return(NULL);
++      if (ret != NULL)
++              regtail(g, ret, br);    /* OPEN -> first. */
++      else
++              ret = br;
++      if (!(flags&HASWIDTH))
++              *flagp &= ~HASWIDTH;
++      *flagp |= flags&SPSTART;
++      while (*g->regparse == '|') {
++              g->regparse++;
++              br = regbranch(g, &flags);
++              if (br == NULL)
++                      return(NULL);
++              regtail(g, ret, br);    /* BRANCH -> BRANCH. */
++              if (!(flags&HASWIDTH))
++                      *flagp &= ~HASWIDTH;
++              *flagp |= flags&SPSTART;
++      }
++
++      /* Make a closing node, and hook it on the end. */
++      ender = regnode(g, (paren) ? CLOSE+parno : END);        
++      regtail(g, ret, ender);
++
++      /* Hook the tails of the branches to the closing node. */
++      for (br = ret; br != NULL; br = regnext(g, br))
++              regoptail(g, br, ender);
++
++      /* Check for proper termination. */
++      if (paren && *g->regparse++ != ')') {
++              FAIL("unmatched ()");
++      } else if (!paren && *g->regparse != '\0') {
++              if (*g->regparse == ')') {
++                      FAIL("unmatched ()");
++              } else
++                      FAIL("junk on end");    /* "Can't happen". */
++              /* NOTREACHED */
++      }
++
++      return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++      register char *ret;
++      register char *chain;
++      register char *latest;
++      int flags;
++
++      *flagp = WORST;         /* Tentatively. */
++
++      ret = regnode(g, BRANCH);
++      chain = NULL;
++      while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++              latest = regpiece(g, &flags);
++              if (latest == NULL)
++                      return(NULL);
++              *flagp |= flags&HASWIDTH;
++              if (chain == NULL)      /* First piece. */
++                      *flagp |= flags&SPSTART;
++              else
++                      regtail(g, chain, latest);
++              chain = latest;
++      }
++      if (chain == NULL)      /* Loop ran zero times. */
++              (void) regnode(g, NOTHING);
++
++      return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized:  they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++      register char *ret;
++      register char op;
++      register char *next;
++      int flags;
++
++      ret = regatom(g, &flags);
++      if (ret == NULL)
++              return(NULL);
++
++      op = *g->regparse;
++      if (!ISMULT(op)) {
++              *flagp = flags;
++              return(ret);
++      }
++
++      if (!(flags&HASWIDTH) && op != '?')
++              FAIL("*+ operand could be empty");
++      *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++      if (op == '*' && (flags&SIMPLE))
++              reginsert(g, STAR, ret);
++      else if (op == '*') {
++              /* Emit x* as (x&|), where & means "self". */
++              reginsert(g, BRANCH, ret);                      /* Either x */
++              regoptail(g, ret, regnode(g, BACK));            /* and loop */
++              regoptail(g, ret, ret);                 /* back */
++              regtail(g, ret, regnode(g, BRANCH));            /* or */
++              regtail(g, ret, regnode(g, NOTHING));           /* null. */
++      } else if (op == '+' && (flags&SIMPLE))
++              reginsert(g, PLUS, ret);
++      else if (op == '+') {
++              /* Emit x+ as x(&|), where & means "self". */
++              next = regnode(g, BRANCH);                      /* Either */
++              regtail(g, ret, next);
++              regtail(g, regnode(g, BACK), ret);              /* loop back */
++              regtail(g, next, regnode(g, BRANCH));           /* or */
++              regtail(g, ret, regnode(g, NOTHING));           /* null. */
++      } else if (op == '?') {
++              /* Emit x? as (x|) */
++              reginsert(g, BRANCH, ret);                      /* Either x */
++              regtail(g, ret, regnode(g, BRANCH));            /* or */
++              next = regnode(g, NOTHING);             /* null. */
++              regtail(g, ret, next);
++              regoptail(g, ret, next);
++      }
++      g->regparse++;
++      if (ISMULT(*g->regparse))
++              FAIL("nested *?+");
++
++      return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization:  gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run.  Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++      register char *ret;
++      int flags;
++
++      *flagp = WORST;         /* Tentatively. */
++
++      switch (*g->regparse++) {
++      case '^':
++              ret = regnode(g, BOL);
++              break;
++      case '$':
++              ret = regnode(g, EOL);
++              break;
++      case '.':
++              ret = regnode(g, ANY);
++              *flagp |= HASWIDTH|SIMPLE;
++              break;
++      case '[': {
++                      register int class;
++                      register int classend;
++
++                      if (*g->regparse == '^') {      /* Complement of range. */
++                              ret = regnode(g, ANYBUT);
++                              g->regparse++;
++                      } else
++                              ret = regnode(g, ANYOF);
++                      if (*g->regparse == ']' || *g->regparse == '-')
++                              regc(g, *g->regparse++);
++                      while (*g->regparse != '\0' && *g->regparse != ']') {
++                              if (*g->regparse == '-') {
++                                      g->regparse++;
++                                      if (*g->regparse == ']' || *g->regparse == '\0')
++                                              regc(g, '-');
++                                      else {
++                                              class = UCHARAT(g->regparse-2)+1;
++                                              classend = UCHARAT(g->regparse);
++                                              if (class > classend+1)
++                                                      FAIL("invalid [] range");
++                                              for (; class <= classend; class++)
++                                                      regc(g, class);
++                                              g->regparse++;
++                                      }
++                              } else
++                                      regc(g, *g->regparse++);
++                      }
++                      regc(g, '\0');
++                      if (*g->regparse != ']')
++                              FAIL("unmatched []");
++                      g->regparse++;
++                      *flagp |= HASWIDTH|SIMPLE;
++              }
++              break;
++      case '(':
++              ret = reg(g, 1, &flags);
++              if (ret == NULL)
++                      return(NULL);
++              *flagp |= flags&(HASWIDTH|SPSTART);
++              break;
++      case '\0':
++      case '|':
++      case ')':
++              FAIL("internal urp");   /* Supposed to be caught earlier. */
++              break;
++      case '?':
++      case '+':
++      case '*':
++              FAIL("?+* follows nothing");
++              break;
++      case '\\':
++              if (*g->regparse == '\0')
++                      FAIL("trailing \\");
++              ret = regnode(g, EXACTLY);
++              regc(g, *g->regparse++);
++              regc(g, '\0');
++              *flagp |= HASWIDTH|SIMPLE;
++              break;
++      default: {
++                      register int len;
++                      register char ender;
++
++                      g->regparse--;
++                      len = my_strcspn((const char *)g->regparse, (const char *)META);
++                      if (len <= 0)
++                              FAIL("internal disaster");
++                      ender = *(g->regparse+len);
++                      if (len > 1 && ISMULT(ender))
++                              len--;          /* Back off clear of ?+* operand. */
++                      *flagp |= HASWIDTH;
++                      if (len == 1)
++                              *flagp |= SIMPLE;
++                      ret = regnode(g, EXACTLY);
++                      while (len > 0) {
++                              regc(g, *g->regparse++);
++                              len--;
++                      }
++                      regc(g, '\0');
++              }
++              break;
++      }
++
++      return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char *                 /* Location. */
++regnode(struct match_globals *g, char op)
++{
++      register char *ret;
++      register char *ptr;
++
++      ret = g->regcode;
++      if (ret == &g->regdummy) {
++              g->regsize += 3;
++              return(ret);
++      }
++
++      ptr = ret;
++      *ptr++ = op;
++      *ptr++ = '\0';          /* Null "next" pointer. */
++      *ptr++ = '\0';
++      g->regcode = ptr;
++
++      return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++      if (g->regcode != &g->regdummy)
++              *g->regcode++ = b;
++      else
++              g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++      register char *src;
++      register char *dst;
++      register char *place;
++
++      if (g->regcode == &g->regdummy) {
++              g->regsize += 3;
++              return;
++      }
++
++      src = g->regcode;
++      g->regcode += 3;
++      dst = g->regcode;
++      while (src > opnd)
++              *--dst = *--src;
++
++      place = opnd;           /* Op node, where operand used to be. */
++      *place++ = op;
++      *place++ = '\0';
++      *place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++      register char *scan;
++      register char *temp;
++      register int offset;
++
++      if (p == &g->regdummy)
++              return;
++
++      /* Find last node. */
++      scan = p;
++      for (;;) {
++              temp = regnext(g, scan);
++              if (temp == NULL)
++                      break;
++              scan = temp;
++      }
++
++      if (OP(scan) == BACK)
++              offset = scan - val;
++      else
++              offset = val - scan;
++      *(scan+1) = (offset>>8)&0377;
++      *(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++      /* "Operandless" and "op != BRANCH" are synonymous in practice. */
++      if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++              return;
++      regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++      register char *s;
++      struct match_globals g;
++
++      /* Be paranoid... */
++      if (prog == NULL || string == NULL) {
++              printk("<3>Regexp: NULL parameter\n");
++              return(0);
++      }
++
++      /* Check validity of program. */
++      if (UCHARAT(prog->program) != MAGIC) {
++              printk("<3>Regexp: corrupted program\n");
++              return(0);
++      }
++
++      /* If there is a "must appear" string, look for it. */
++      if (prog->regmust != NULL) {
++              s = string;
++              while ((s = strchr(s, prog->regmust[0])) != NULL) {
++                      if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++                              break;  /* Found it. */
++                      s++;
++              }
++              if (s == NULL)  /* Not present. */
++                      return(0);
++      }
++
++      /* Mark beginning of line for ^ . */
++      g.regbol = string;
++
++      /* Simplest case:  anchored match need be tried only once. */
++      if (prog->reganch)
++              return(regtry(&g, prog, string));
++
++      /* Messy cases:  unanchored match. */
++      s = string;
++      if (prog->regstart != '\0')
++              /* We know what char it must start with. */
++              while ((s = strchr(s, prog->regstart)) != NULL) {
++                      if (regtry(&g, prog, s))
++                              return(1);
++                      s++;
++              }
++      else
++              /* We don't -- general case. */
++              do {
++                      if (regtry(&g, prog, s))
++                              return(1);
++              } while (*s++ != '\0');
++
++      /* Failure. */
++      return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int                    /* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++      register int i;
++      register char **sp;
++      register char **ep;
++
++      g->reginput = string;
++      g->regstartp = prog->startp;
++      g->regendp = prog->endp;
++
++      sp = prog->startp;
++      ep = prog->endp;
++      for (i = NSUBEXP; i > 0; i--) {
++              *sp++ = NULL;
++              *ep++ = NULL;
++      }
++      if (regmatch(g, prog->program + 1)) {
++              prog->startp[0] = string;
++              prog->endp[0] = g->reginput;
++              return(1);
++      } else
++              return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple:  check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly.  In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int                    /* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++      register char *scan = prog; /* Current node. */
++      char *next;                 /* Next node. */
++
++#ifdef DEBUG
++      if (scan != NULL && regnarrate)
++              fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++      while (scan != NULL) {
++#ifdef DEBUG
++              if (regnarrate)
++                      fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++              next = regnext(g, scan);
++
++              switch (OP(scan)) {
++              case BOL:
++                      if (g->reginput != g->regbol)
++                              return(0);
++                      break;
++              case EOL:
++                      if (*g->reginput != '\0')
++                              return(0);
++                      break;
++              case ANY:
++                      if (*g->reginput == '\0')
++                              return(0);
++                      g->reginput++;
++                      break;
++              case EXACTLY: {
++                              register int len;
++                              register char *opnd;
++
++                              opnd = OPERAND(scan);
++                              /* Inline the first character, for speed. */
++                              if (*opnd != *g->reginput)
++                                      return(0);
++                              len = strlen(opnd);
++                              if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++                                      return(0);
++                              g->reginput += len;
++                      }
++                      break;
++              case ANYOF:
++                      if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++                              return(0);
++                      g->reginput++;
++                      break;
++              case ANYBUT:
++                      if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++                              return(0);
++                      g->reginput++;
++                      break;
++              case NOTHING:
++              case BACK:
++                      break;
++              case OPEN+1:
++              case OPEN+2:
++              case OPEN+3:
++              case OPEN+4:
++              case OPEN+5:
++              case OPEN+6:
++              case OPEN+7:
++              case OPEN+8:
++              case OPEN+9: {
++                              register int no;
++                              register char *save;
++
++                              no = OP(scan) - OPEN;
++                              save = g->reginput;
++
++                              if (regmatch(g, next)) {
++                                      /*
++                                       * Don't set startp if some later
++                                       * invocation of the same parentheses
++                                       * already has.
++                                       */
++                                      if (g->regstartp[no] == NULL)
++                                              g->regstartp[no] = save;
++                                      return(1);
++                              } else
++                                      return(0);
++                      }
++                      break;
++              case CLOSE+1:
++              case CLOSE+2:
++              case CLOSE+3:
++              case CLOSE+4:
++              case CLOSE+5:
++              case CLOSE+6:
++              case CLOSE+7:
++              case CLOSE+8:
++              case CLOSE+9:
++                      {
++                              register int no;
++                              register char *save;
++
++                              no = OP(scan) - CLOSE;
++                              save = g->reginput;
++
++                              if (regmatch(g, next)) {
++                                      /*
++                                       * Don't set endp if some later
++                                       * invocation of the same parentheses
++                                       * already has.
++                                       */
++                                      if (g->regendp[no] == NULL)
++                                              g->regendp[no] = save;
++                                      return(1);
++                              } else
++                                      return(0);
++                      }
++                      break;
++              case BRANCH: {
++                              register char *save;
++
++                              if (OP(next) != BRANCH)         /* No choice. */
++                                      next = OPERAND(scan);   /* Avoid recursion. */
++                              else {
++                                      do {
++                                              save = g->reginput;
++                                              if (regmatch(g, OPERAND(scan)))
++                                                      return(1);
++                                              g->reginput = save;
++                                              scan = regnext(g, scan);
++                                      } while (scan != NULL && OP(scan) == BRANCH);
++                                      return(0);
++                                      /* NOTREACHED */
++                              }
++                      }
++                      break;
++              case STAR:
++              case PLUS: {
++                              register char nextch;
++                              register int no;
++                              register char *save;
++                              register int min;
++
++                              /*
++                               * Lookahead to avoid useless match attempts
++                               * when we know what character comes next.
++                               */
++                              nextch = '\0';
++                              if (OP(next) == EXACTLY)
++                                      nextch = *OPERAND(next);
++                              min = (OP(scan) == STAR) ? 0 : 1;
++                              save = g->reginput;
++                              no = regrepeat(g, OPERAND(scan));
++                              while (no >= min) {
++                                      /* If it could work, try it. */
++                                      if (nextch == '\0' || *g->reginput == nextch)
++                                              if (regmatch(g, next))
++                                                      return(1);
++                                      /* Couldn't or didn't -- back up. */
++                                      no--;
++                                      g->reginput = save + no;
++                              }
++                              return(0);
++                      }
++                      break;
++              case END:
++                      return(1);      /* Success! */
++                      break;
++              default:
++                      printk("<3>Regexp: memory corruption\n");
++                      return(0);
++                      break;
++              }
++
++              scan = next;
++      }
++
++      /*
++       * We get here only if there's trouble -- normally "case END" is
++       * the terminating point.
++       */
++      printk("<3>Regexp: corrupted pointers\n");
++      return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++      register int count = 0;
++      register char *scan;
++      register char *opnd;
++
++      scan = g->reginput;
++      opnd = OPERAND(p);
++      switch (OP(p)) {
++      case ANY:
++              count = strlen(scan);
++              scan += count;
++              break;
++      case EXACTLY:
++              while (*opnd == *scan) {
++                      count++;
++                      scan++;
++              }
++              break;
++      case ANYOF:
++              while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++                      count++;
++                      scan++;
++              }
++              break;
++      case ANYBUT:
++              while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++                      count++;
++                      scan++;
++              }
++              break;
++      default:                /* Oh dear.  Called inappropriately. */
++              printk("<3>Regexp: internal foulup\n");
++              count = 0;      /* Best compromise. */
++              break;
++      }
++      g->reginput = scan;
++
++      return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++      register int offset;
++
++      if (p == &g->regdummy)
++              return(NULL);
++
++      offset = NEXT(p);
++      if (offset == 0)
++              return(NULL);
++
++      if (OP(p) == BACK)
++              return(p-offset);
++      else
++              return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++      register char *s;
++      register char op = EXACTLY;     /* Arbitrary non-END op. */
++      register char *next;
++      /* extern char *strchr(); */
++
++
++      s = r->program + 1;
++      while (op != END) {     /* While that wasn't END last time... */
++              op = OP(s);
++              printf("%2d%s", s-r->program, regprop(s));      /* Where, what. */
++              next = regnext(s);
++              if (next == NULL)               /* Next ptr. */
++                      printf("(0)");
++              else
++                      printf("(%d)", (s-r->program)+(next-s));
++              s += 3;
++              if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++                      /* Literal string, where present. */
++                      while (*s != '\0') {
++                              putchar(*s);
++                              s++;
++                      }
++                      s++;
++              }
++              putchar('\n');
++      }
++
++      /* Header fields of interest. */
++      if (r->regstart != '\0')
++              printf("start `%c' ", r->regstart);
++      if (r->reganch)
++              printf("anchored ");
++      if (r->regmust != NULL)
++              printf("must have \"%s\"", r->regmust);
++      printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++      register char *p;
++      static char buf[BUFLEN];
++
++      strcpy(buf, ":");
++
++      switch (OP(op)) {
++      case BOL:
++              p = "BOL";
++              break;
++      case EOL:
++              p = "EOL";
++              break;
++      case ANY:
++              p = "ANY";
++              break;
++      case ANYOF:
++              p = "ANYOF";
++              break;
++      case ANYBUT:
++              p = "ANYBUT";
++              break;
++      case BRANCH:
++              p = "BRANCH";
++              break;
++      case EXACTLY:
++              p = "EXACTLY";
++              break;
++      case NOTHING:
++              p = "NOTHING";
++              break;
++      case BACK:
++              p = "BACK";
++              break;
++      case END:
++              p = "END";
++              break;
++      case OPEN+1:
++      case OPEN+2:
++      case OPEN+3:
++      case OPEN+4:
++      case OPEN+5:
++      case OPEN+6:
++      case OPEN+7:
++      case OPEN+8:
++      case OPEN+9:
++              snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++              p = NULL;
++              break;
++      case CLOSE+1:
++      case CLOSE+2:
++      case CLOSE+3:
++      case CLOSE+4:
++      case CLOSE+5:
++      case CLOSE+6:
++      case CLOSE+7:
++      case CLOSE+8:
++      case CLOSE+9:
++              snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++              p = NULL;
++              break;
++      case STAR:
++              p = "STAR";
++              break;
++      case PLUS:
++              p = "PLUS";
++              break;
++      default:
++              printk("<3>Regexp: corrupted opcode\n");
++              break;
++      }
++      if (p != NULL)
++              strncat(buf, p, BUFLEN-strlen(buf));
++      return(buf);
++}
++#endif
++
++
+diff -Naur linux-3.10.5.org/net/netfilter/regexp/regexp.h linux-3.10.5/net/netfilter/regexp/regexp.h
+--- linux-3.10.5.org/net/netfilter/regexp/regexp.h     1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/net/netfilter/regexp/regexp.h 2013-08-07 06:16:14.430800990 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat:  this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10.  If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP  10
++typedef struct regexp {
++      char *startp[NSUBEXP];
++      char *endp[NSUBEXP];
++      char regstart;          /* Internal use only. */
++      char reganch;           /* Internal use only. */
++      char *regmust;          /* Internal use only. */
++      int regmlen;            /* Internal use only. */
++      char program[1];        /* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+diff -Naur linux-3.10.5.org/net/netfilter/regexp/regmagic.h linux-3.10.5/net/netfilter/regexp/regmagic.h
+--- linux-3.10.5.org/net/netfilter/regexp/regmagic.h   1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/net/netfilter/regexp/regmagic.h       2013-08-07 06:16:14.450800314 +0200
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define       MAGIC   0234
+diff -Naur linux-3.10.5.org/net/netfilter/regexp/regsub.c linux-3.10.5/net/netfilter/regexp/regsub.c
+--- linux-3.10.5.org/net/netfilter/regexp/regsub.c     1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/net/netfilter/regexp/regsub.c 2013-08-07 06:16:14.450800314 +0200
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c       1.3 of 2 April 86
++ *
++ *    Copyright (c) 1986 by University of Toronto.
++ *    Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *    Permission is granted to anyone to use this software for any
++ *    purpose on any computer system, and to redistribute it freely,
++ *    subject to the following restrictions:
++ *
++ *    1. The author is not responsible for the consequences of use of
++ *            this software, no matter how awful, even if they arise
++ *            from defects in it.
++ *
++ *    2. The origin of this software must not be misrepresented, either
++ *            by explicit claim or by omission.
++ *
++ *    3. Altered versions must be plainly marked as such, and must not
++ *            be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define       UCHARAT(p)      ((int)*(unsigned char *)(p))
++#else
++#define       UCHARAT(p)      ((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++//        printk("regexp(3): %s", s);
++//        /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++      register char *src;
++      register char *dst;
++      register char c;
++      register int no;
++      register int len;
++      
++      /* Not necessary and gcc doesn't like it -MLS */
++      /*extern char *strncpy();*/
++
++      if (prog == NULL || source == NULL || dest == NULL) {
++              regerror("NULL parm to regsub");
++              return;
++      }
++      if (UCHARAT(prog->program) != MAGIC) {
++              regerror("damaged regexp fed to regsub");
++              return;
++      }
++
++      src = source;
++      dst = dest;
++      while ((c = *src++) != '\0') {
++              if (c == '&')
++                      no = 0;
++              else if (c == '\\' && '0' <= *src && *src <= '9')
++                      no = *src++ - '0';
++              else
++                      no = -1;
++
++              if (no < 0) {   /* Ordinary character. */
++                      if (c == '\\' && (*src == '\\' || *src == '&'))
++                              c = *src++;
++                      *dst++ = c;
++              } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++                      len = prog->endp[no] - prog->startp[no];
++                      (void) strncpy(dst, prog->startp[no], len);
++                      dst += len;
++                      if (len != 0 && *(dst-1) == '\0') {     /* strncpy hit NUL. */
++                              regerror("damaged match string");
++                              return;
++                      }
++              }
++      }
++      *dst++ = '\0';
++}
+diff -Naur linux-3.10.5.org/net/netfilter/xt_layer7.c linux-3.10.5/net/netfilter/xt_layer7.c
+--- linux-3.10.5.org/net/netfilter/xt_layer7.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.10.5/net/netfilter/xt_layer7.c     2013-08-07 06:16:14.490798961 +0200
+@@ -0,0 +1,684 @@
++/*
++  Kernel module to match application layer (OSI layer 7) data in connections.
++
++  http://l7-filter.sf.net
++
++  (C) 2003-2009 Matthew Strait and Ethan Sommer.
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version
++  2 of the License, or (at your option) any later version.
++  http://www.gnu.org/licenses/gpl.txt
++
++  Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>,
++  xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait,
++  Ethan Sommer, Justin Levandoski.
++*/
++
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++#include <net/netfilter/nf_conntrack_extend.h>
++#include <net/netfilter/nf_conntrack_acct.h>
++#endif
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_layer7.h>
++#include <linux/ctype.h>
++#include <linux/proc_fs.h>
++
++#include "regexp/regexp.c"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
++MODULE_DESCRIPTION("iptables application layer match module");
++MODULE_ALIAS("ipt_layer7");
++MODULE_VERSION("2.22ipfire");
++
++static int maxdatalen = 2048; // this is the default
++module_param(maxdatalen, int, 0444);
++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
++#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
++      #define DPRINTK(format,args...) printk(format,##args)
++#else
++      #define DPRINTK(format,args...)
++#endif
++
++/* Number of packets whose data we look at.
++This can be modified through /proc/net/layer7_numpackets */
++static int num_packets = 10;
++
++static struct pattern_cache {
++      char * regex_string;
++      regexp * pattern;
++      struct pattern_cache * next;
++} * first_pattern_cache = NULL;
++
++DEFINE_SPINLOCK(l7_lock);
++
++static int total_acct_packets(struct nf_conn *ct)
++{
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26)
++      BUG_ON(ct == NULL);
++      return (ct->counters[IP_CT_DIR_ORIGINAL].packets + ct->counters[IP_CT_DIR_REPLY].packets);
++#else
++      struct nf_conn_counter *acct;
++
++      BUG_ON(ct == NULL);
++      acct = nf_conn_acct_find(ct);
++      if (!acct)
++              return 0;
++      return ( atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets) );
++#endif
++}
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++/* Converts an unfriendly string into a friendly one by
++replacing unprintables with periods and all whitespace with " ". */
++static char * friendly_print(unsigned char * s)
++{
++      char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
++      int i;
++
++      if(!f) {
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: out of memory in "
++                                      "friendly_print, bailing.\n");
++              return NULL;
++      }
++
++      for(i = 0; i < strlen(s); i++){
++              if(isprint(s[i]) && s[i] < 128) f[i] = s[i];
++              else if(isspace(s[i]))          f[i] = ' ';
++              else                            f[i] = '.';
++      }
++      f[i] = '\0';
++      return f;
++}
++
++static char dec2hex(int i)
++{
++      switch (i) {
++              case 0 ... 9:
++                      return (i + '0');
++                      break;
++              case 10 ... 15:
++                      return (i - 10 + 'a');
++                      break;
++              default:
++                      if (net_ratelimit())
++                              printk("layer7: Problem in dec2hex\n");
++                      return '\0';
++      }
++}
++
++static char * hex_print(unsigned char * s)
++{
++      char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
++      int i;
++
++      if(!g) {
++             if (net_ratelimit())
++                      printk(KERN_ERR "layer7: out of memory in hex_print, "
++                                      "bailing.\n");
++             return NULL;
++      }
++
++      for(i = 0; i < strlen(s); i++) {
++              g[i*3    ] = dec2hex(s[i]/16);
++              g[i*3 + 1] = dec2hex(s[i]%16);
++              g[i*3 + 2] = ' ';
++      }
++      g[i*3] = '\0';
++
++      return g;
++}
++#endif // DEBUG
++
++/* Use instead of regcomp.  As we expect to be seeing the same regexps over and
++over again, it make sense to cache the results. */
++static regexp * compile_and_cache(const char * regex_string, 
++                                  const char * protocol)
++{
++      struct pattern_cache * node               = first_pattern_cache;
++      struct pattern_cache * last_pattern_cache = first_pattern_cache;
++      struct pattern_cache * tmp;
++      unsigned int len;
++
++      while (node != NULL) {
++              if (!strcmp(node->regex_string, regex_string))
++              return node->pattern;
++
++              last_pattern_cache = node;/* points at the last non-NULL node */
++              node = node->next;
++      }
++
++      /* If we reach the end of the list, then we have not yet cached
++         the pattern for this regex. Let's do that now.
++         Be paranoid about running out of memory to avoid list corruption. */
++      tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
++
++      if(!tmp) {
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: out of memory in "
++                                      "compile_and_cache, bailing.\n");
++              return NULL;
++      }
++
++      tmp->regex_string  = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
++      tmp->pattern       = kmalloc(sizeof(struct regexp),    GFP_ATOMIC);
++      tmp->next = NULL;
++
++      if(!tmp->regex_string || !tmp->pattern) {
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: out of memory in "
++                                      "compile_and_cache, bailing.\n");
++              kfree(tmp->regex_string);
++              kfree(tmp->pattern);
++              kfree(tmp);
++              return NULL;
++      }
++
++      /* Ok.  The new node is all ready now. */
++      node = tmp;
++
++      if(first_pattern_cache == NULL) /* list is empty */
++              first_pattern_cache = node; /* make node the beginning */
++      else
++              last_pattern_cache->next = node; /* attach node to the end */
++
++      /* copy the string and compile the regex */
++      len = strlen(regex_string);
++      DPRINTK("layer7: about to compile this: \"%s\"\n", regex_string);
++      node->pattern = regcomp((char *)regex_string, &len);
++      if ( !node->pattern ) {
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: Error compiling regexp "
++                                      "\"%s\" (%s)\n", 
++                                      regex_string, protocol);
++              /* pattern is now cached as NULL, so we won't try again. */
++      }
++
++      strcpy(node->regex_string, regex_string);
++      return node->pattern;
++}
++
++static int can_handle(const struct sk_buff *skb)
++{
++      if(!ip_hdr(skb)) /* not IP */
++              return 0;
++      if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
++         ip_hdr(skb)->protocol != IPPROTO_UDP &&
++         ip_hdr(skb)->protocol != IPPROTO_ICMP)
++              return 0;
++      return 1;
++}
++
++/* Returns offset the into the skb->data that the application data starts */
++static int app_data_offset(const struct sk_buff *skb)
++{
++      /* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
++      isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
++      int ip_hl = 4*ip_hdr(skb)->ihl;
++
++      if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
++              /* 12 == offset into TCP header for the header length field.
++              Can't get this with skb->h.th->doff because the tcphdr
++              struct doesn't get set when routing (this is confirmed to be
++              true in Netfilter as well as QoS.) */
++              int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
++
++              return ip_hl + tcp_hl;
++      } else if( ip_hdr(skb)->protocol == IPPROTO_UDP  ) {
++              return ip_hl + 8; /* UDP header is always 8 bytes */
++      } else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
++              return ip_hl + 8; /* ICMP header is 8 bytes */
++      } else {
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: tried to handle unknown "
++                                      "protocol!\n");
++              return ip_hl + 8; /* something reasonable */
++      }
++}
++
++/* handles whether there's a match when we aren't appending data anymore */
++static int match_no_append(struct nf_conn * conntrack, 
++                           struct nf_conn * master_conntrack, 
++                           enum ip_conntrack_info ctinfo,
++                           enum ip_conntrack_info master_ctinfo,
++                           const struct xt_layer7_info * info)
++{
++      /* If we're in here, throw the app data away */
++      if(master_conntrack->layer7.app_data != NULL) {
++
++      #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++              if(!master_conntrack->layer7.app_proto) {
++                      char * f = 
++                        friendly_print(master_conntrack->layer7.app_data);
++                      char * g = 
++                        hex_print(master_conntrack->layer7.app_data);
++                      DPRINTK("\nl7-filter gave up after %d bytes "
++                              "(%d packets):\n%s\n",
++                              strlen(f), total_acct_packets(master_conntrack), f);
++                      kfree(f);
++                      DPRINTK("In hex: %s\n", g);
++                      kfree(g);
++              }
++      #endif
++
++              kfree(master_conntrack->layer7.app_data);
++              master_conntrack->layer7.app_data = NULL; /* don't free again */
++      }
++
++      if(master_conntrack->layer7.app_proto){
++              /* Here child connections set their .app_proto (for /proc) */
++              if(!conntrack->layer7.app_proto) {
++                      conntrack->layer7.app_proto = 
++                        kmalloc(strlen(master_conntrack->layer7.app_proto)+1, 
++                          GFP_ATOMIC);
++                      if(!conntrack->layer7.app_proto){
++                              if (net_ratelimit())
++                                      printk(KERN_ERR "layer7: out of memory "
++                                                      "in match_no_append, "
++                                                      "bailing.\n");
++                              return 1;
++                      }
++                      strcpy(conntrack->layer7.app_proto, 
++                              master_conntrack->layer7.app_proto);
++              }
++
++              return (!strcmp(master_conntrack->layer7.app_proto, 
++                              info->protocol));
++      }
++      else {
++              /* If not classified, set to "unknown" to distinguish from
++              connections that are still being tested. */
++              master_conntrack->layer7.app_proto = 
++                      kmalloc(strlen("unknown")+1, GFP_ATOMIC);
++              if(!master_conntrack->layer7.app_proto){
++                      if (net_ratelimit())
++                              printk(KERN_ERR "layer7: out of memory in "
++                                              "match_no_append, bailing.\n");
++                      return 1;
++              }
++              strcpy(master_conntrack->layer7.app_proto, "unknown");
++              return 0;
++      }
++}
++
++/* add the new app data to the conntrack.  Return number of bytes added. */
++static int add_data(struct nf_conn * master_conntrack,
++                    char * app_data, int appdatalen)
++{
++      int length = 0, i;
++      int oldlength = master_conntrack->layer7.app_data_len;
++
++      /* This is a fix for a race condition by Deti Fliegl. However, I'm not 
++         clear on whether the race condition exists or whether this really 
++         fixes it.  I might just be being dense... Anyway, if it's not really 
++         a fix, all it does is waste a very small amount of time. */
++      if(!master_conntrack->layer7.app_data) return 0;
++
++      /* Strip nulls. Make everything lower case (our regex lib doesn't
++      do case insensitivity).  Add it to the end of the current data. */
++      for(i = 0; i < maxdatalen-oldlength-1 &&
++                 i < appdatalen; i++) {
++              if(app_data[i] != '\0') {
++                      /* the kernel version of tolower mungs 'upper ascii' */
++                      master_conntrack->layer7.app_data[length+oldlength] =
++                              isascii(app_data[i])? 
++                                      tolower(app_data[i]) : app_data[i];
++                      length++;
++              }
++      }
++
++      master_conntrack->layer7.app_data[length+oldlength] = '\0';
++      master_conntrack->layer7.app_data_len = length + oldlength;
++
++      return length;
++}
++
++/* taken from drivers/video/modedb.c */
++static int my_atoi(const char *s)
++{
++      int val = 0;
++
++      for (;; s++) {
++              switch (*s) {
++                      case '0'...'9':
++                      val = 10*val+(*s-'0');
++                      break;
++              default:
++                      return val;
++              }
++      }
++}
++
++/* write out num_packets to userland. */
++static int layer7_read_proc(char* page, char ** start, off_t off, int count,
++                            int* eof, void * data)
++{
++      if(num_packets > 99 && net_ratelimit())
++              printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
++
++      page[0] = num_packets/10 + '0';
++      page[1] = num_packets%10 + '0';
++      page[2] = '\n';
++      page[3] = '\0';
++
++      *eof=1;
++
++      return 3;
++}
++
++/* Read in num_packets from userland */
++static int layer7_write_proc(struct file* file, const char* buffer,
++                             unsigned long count, void *data)
++{
++      char * foo = kmalloc(count, GFP_ATOMIC);
++
++      if(!foo){
++              if (net_ratelimit())
++                      printk(KERN_ERR "layer7: out of memory, bailing. "
++                                      "num_packets unchanged.\n");
++              return count;
++      }
++
++      if(copy_from_user(foo, buffer, count)) {
++              return -EFAULT;
++      }
++
++
++      num_packets = my_atoi(foo);
++      kfree (foo);
++
++      /* This has an arbitrary limit to make the math easier. I'm lazy.
++      But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
++      if(num_packets > 99) {
++              printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
++              num_packets = 99;
++      } else if(num_packets < 1) {
++              printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
++              num_packets = 1;
++      }
++
++      return count;
++}
++
++static bool
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++match(const struct sk_buff *skbin, struct xt_action_param *par)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++match(const struct sk_buff *skbin, const struct xt_match_param *par)
++#else
++match(const struct sk_buff *skbin,
++      const struct net_device *in,
++      const struct net_device *out,
++      const struct xt_match *match,
++      const void *matchinfo,
++      int offset,
++      unsigned int protoff,
++      bool *hotdrop)
++#endif
++{
++      /* sidestep const without getting a compiler warning... */
++      struct sk_buff * skb = (struct sk_buff *)skbin; 
++
++      const struct xt_layer7_info * info = 
++      #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++              par->matchinfo;
++      #else
++              matchinfo;
++      #endif
++
++      enum ip_conntrack_info master_ctinfo, ctinfo;
++      struct nf_conn *master_conntrack, *conntrack;
++      unsigned char * app_data;
++      unsigned int pattern_result, appdatalen;
++      regexp * comppattern;
++
++      /* Be paranoid/incompetent - lock the entire match function. */
++      spin_lock_bh(&l7_lock);
++
++      if(!can_handle(skb)){
++              DPRINTK("layer7: This is some protocol I can't handle.\n");
++              spin_unlock_bh(&l7_lock);
++              return info->invert;
++      }
++
++      /* Treat parent & all its children together as one connection, except
++      for the purpose of setting conntrack->layer7.app_proto in the actual
++      connection. This makes /proc/net/ip_conntrack more satisfying. */
++      if(!(conntrack = nf_ct_get(skb, &ctinfo)) ||
++         !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){
++              DPRINTK("layer7: couldn't get conntrack.\n");
++              spin_unlock_bh(&l7_lock);
++              return info->invert;
++      }
++
++      /* Try to get a master conntrack (and its master etc) for FTP, etc. */
++      while (master_ct(master_conntrack) != NULL)
++              master_conntrack = master_ct(master_conntrack);
++
++      /* if we've classified it or seen too many packets */
++      if(total_acct_packets(master_conntrack) > num_packets ||
++         master_conntrack->layer7.app_proto) {
++
++              pattern_result = match_no_append(conntrack, master_conntrack, 
++                                               ctinfo, master_ctinfo, info);
++
++              /* skb->cb[0] == seen. Don't do things twice if there are 
++              multiple l7 rules. I'm not sure that using cb for this purpose 
++              is correct, even though it says "put your private variables 
++              there". But it doesn't look like it is being used for anything
++              else in the skbs that make it here. */
++              skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
++
++              spin_unlock_bh(&l7_lock);
++              return (pattern_result ^ info->invert);
++      }
++
++      if(skb_is_nonlinear(skb)){
++              if(skb_linearize(skb) != 0){
++                      if (net_ratelimit())
++                              printk(KERN_ERR "layer7: failed to linearize "
++                                              "packet, bailing.\n");
++                      spin_unlock_bh(&l7_lock);
++                      return info->invert;
++              }
++      }
++
++      /* now that the skb is linearized, it's safe to set these. */
++      app_data = skb->data + app_data_offset(skb);
++      appdatalen = skb_tail_pointer(skb) - app_data;
++
++      /* the return value gets checked later, when we're ready to use it */
++      comppattern = compile_and_cache(info->pattern, info->protocol);
++
++      /* On the first packet of a connection, allocate space for app data */
++      if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] && 
++         !master_conntrack->layer7.app_data){
++              master_conntrack->layer7.app_data = 
++                      kmalloc(maxdatalen, GFP_ATOMIC);
++              if(!master_conntrack->layer7.app_data){
++                      if (net_ratelimit())
++                              printk(KERN_ERR "layer7: out of memory in "
++                                              "match, bailing.\n");
++                      spin_unlock_bh(&l7_lock);
++                      return info->invert;
++              }
++
++              master_conntrack->layer7.app_data[0] = '\0';
++      }
++
++      /* Can be here, but unallocated, if numpackets is increased near
++      the beginning of a connection */
++      if(master_conntrack->layer7.app_data == NULL){
++              spin_unlock_bh(&l7_lock);
++              return info->invert; /* unmatched */
++      }
++
++      if(!skb->cb[0]){
++              int newbytes;
++              newbytes = add_data(master_conntrack, app_data, appdatalen);
++
++              if(newbytes == 0) { /* didn't add any data */
++                      skb->cb[0] = 1;
++                      /* Didn't match before, not going to match now */
++                      spin_unlock_bh(&l7_lock);
++                      return info->invert;
++              }
++      }
++
++      /* If looking for "unknown", then never match.  "Unknown" means that
++      we've given up; we're still trying with these packets. */
++      if(!strcmp(info->protocol, "unknown")) {
++              pattern_result = 0;
++      /* If looking for "unset", then always match. "Unset" means that we
++      haven't yet classified the connection. */
++      } else if(!strcmp(info->protocol, "unset")) {
++              pattern_result = 2;
++              DPRINTK("layer7: matched unset: not yet classified "
++                      "(%d/%d packets)\n",
++                        total_acct_packets(master_conntrack), num_packets);
++      /* If the regexp failed to compile, don't bother running it */
++      } else if(comppattern && 
++                regexec(comppattern, master_conntrack->layer7.app_data)){
++              DPRINTK("layer7: matched %s\n", info->protocol);
++              pattern_result = 1;
++      } else pattern_result = 0;
++
++      if(pattern_result == 1) {
++              master_conntrack->layer7.app_proto = 
++                      kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
++              if(!master_conntrack->layer7.app_proto){
++                      if (net_ratelimit())
++                              printk(KERN_ERR "layer7: out of memory in "
++                                              "match, bailing.\n");
++                      spin_unlock_bh(&l7_lock);
++                      return (pattern_result ^ info->invert);
++              }
++              strcpy(master_conntrack->layer7.app_proto, info->protocol);
++      } else if(pattern_result > 1) { /* cleanup from "unset" */
++              pattern_result = 1;
++      }
++
++      /* mark the packet seen */
++      skb->cb[0] = 1;
++
++      spin_unlock_bh(&l7_lock);
++      return (pattern_result ^ info->invert);
++}
++
++// load nf_conntrack_ipv4
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++static int
++#else
++static bool
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++check(const struct xt_mtchk_param *par)
++{
++        if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
++                printk(KERN_WARNING "can't load conntrack support for "
++                                    "proto=%d\n", par->match->family);
++#else
++check(const char *tablename, const void *inf,
++               const struct xt_match *match, void *matchinfo,
++               unsigned int hook_mask)
++{
++        if (nf_ct_l3proto_try_module_get(match->family) < 0) {
++                printk(KERN_WARNING "can't load conntrack support for "
++                                    "proto=%d\n", match->family);
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++              return -EINVAL;
++      }
++      return 0;
++#else
++                return 0;
++        }
++      return 1;
++#endif
++}
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++      static void destroy(const struct xt_mtdtor_param *par)
++      {
++              nf_ct_l3proto_module_put(par->match->family);
++      }
++#else
++      static void destroy(const struct xt_match *match, void *matchinfo)
++      {
++              nf_ct_l3proto_module_put(match->family);
++      }
++#endif
++
++static struct xt_match xt_layer7_match[] __read_mostly = {
++{
++      .name           = "layer7",
++      .family         = AF_INET,
++      .checkentry     = check,
++      .match          = match,
++      .destroy        = destroy,
++      .matchsize      = sizeof(struct xt_layer7_info),
++      .me             = THIS_MODULE
++}
++};
++
++static void layer7_cleanup_proc(void)
++{
++//    remove_proc_entry("layer7_numpackets", init_net.proc_net);
++}
++
++/* register the proc file */
++static void layer7_init_proc(void)
++{
++      struct proc_dir_entry* entry;
++//    entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net);
++//    entry->read_proc = layer7_read_proc;
++//    entry->write_proc = layer7_write_proc;
++}
++
++static int __init xt_layer7_init(void)
++{
++      need_conntrack();
++
++      if (init_net.ct.sysctl_acct == 0) {
++              printk(KERN_WARNING "layer7: enabling nf_conntrack_acct\n");
++              init_net.ct.sysctl_acct = 1;
++      }
++
++      layer7_init_proc();
++      if(maxdatalen < 1) {
++              printk(KERN_WARNING "layer7: maxdatalen can't be < 1, "
++                      "using 1\n");
++              maxdatalen = 1;
++      }
++      /* This is not a hard limit.  It's just here to prevent people from
++      bringing their slow machines to a grinding halt. */
++      else if(maxdatalen > 65536) {
++              printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, "
++                      "using 65536\n");
++              maxdatalen = 65536;
++      }
++      return xt_register_matches(xt_layer7_match,
++                                 ARRAY_SIZE(xt_layer7_match));
++}
++
++static void __exit xt_layer7_fini(void)
++{
++      layer7_cleanup_proc();
++      xt_unregister_matches(xt_layer7_match, ARRAY_SIZE(xt_layer7_match));
++}
++
++module_init(xt_layer7_init);
++module_exit(xt_layer7_fini);