1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 9de9813..1462492 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
44 @@ -72,9 +78,11 @@ Image
56 @@ -83,6 +91,7 @@ aic7*seq.h*
64 @@ -95,32 +104,40 @@ bounds.h
92 +devicetable-offsets.h
101 +exception_policy.conf
105 @@ -128,12 +145,15 @@ fore200e_pca_fw.c*
121 @@ -148,14 +168,14 @@ int32.c
138 @@ -165,14 +185,15 @@ mach-types.h
155 @@ -188,6 +209,8 @@ oui.c*
164 @@ -197,6 +220,7 @@ perf-archive
172 @@ -206,7 +230,12 @@ r200_reg_safe.h
176 +randomize_layout_hash.h
177 +randomize_layout_seed.h
185 @@ -216,8 +245,12 @@ series
190 +size_overflow_hash.h
198 @@ -227,6 +260,7 @@ tftpboot.img
206 @@ -238,13 +272,17 @@ vdso32.lds
224 @@ -252,9 +290,12 @@ vsyscall_32.lds
237 diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238 index 74b6c6d..eac0e77 100644
239 --- a/Documentation/kbuild/makefiles.txt
240 +++ b/Documentation/kbuild/makefiles.txt
241 @@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245 - --- 4.3 Using C++ for host programs
246 - --- 4.4 Controlling compiler options for host programs
247 - --- 4.5 When host programs are actually built
248 - --- 4.6 Using hostprogs-$(CONFIG_FOO)
249 + --- 4.3 Defining shared libraries
250 + --- 4.4 Using C++ for host programs
251 + --- 4.5 Controlling compiler options for host programs
252 + --- 4.6 When host programs are actually built
253 + --- 4.7 Using hostprogs-$(CONFIG_FOO)
255 === 5 Kbuild clean infrastructure
257 @@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
261 ---- 4.3 Using C++ for host programs
262 +--- 4.3 Defining shared libraries
264 + Objects with extension .so are considered shared libraries, and
265 + will be compiled as position independent objects.
266 + Kbuild provides support for shared libraries, but the usage
267 + shall be restricted.
268 + In the following example the libkconfig.so shared library is used
269 + to link the executable conf.
272 + #scripts/kconfig/Makefile
273 + hostprogs-y := conf
274 + conf-objs := conf.o libkconfig.so
275 + libkconfig-objs := expr.o type.o
277 + Shared libraries always require a corresponding -objs line, and
278 + in the example above the shared library libkconfig is composed by
279 + the two objects expr.o and type.o.
280 + expr.o and type.o will be built as position independent code and
281 + linked as a shared library libkconfig.so. C++ is not supported for
284 +--- 4.4 Using C++ for host programs
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288 @@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
292 ---- 4.4 Controlling compiler options for host programs
293 +--- 4.5 Controlling compiler options for host programs
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297 @@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
301 ---- 4.5 When host programs are actually built
302 +--- 4.6 When host programs are actually built
304 Kbuild will only build host-programs when they are referenced
306 @@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
310 ---- 4.6 Using hostprogs-$(CONFIG_FOO)
311 +--- 4.7 Using hostprogs-$(CONFIG_FOO)
313 A typical pattern in a Kbuild file looks like this:
315 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316 index 6726139..c825c0a 100644
317 --- a/Documentation/kernel-parameters.txt
318 +++ b/Documentation/kernel-parameters.txt
319 @@ -1223,6 +1223,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
323 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324 + ignore grsecurity's /proc restrictions
326 + grsec_sysfs_restrict= Format: 0 | 1
328 + Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333 @@ -2333,6 +2340,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
338 + Disable PCID (Process-Context IDentifier) even if it
339 + is supported by the processor.
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344 @@ -2631,6 +2642,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
348 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349 + virtualization environments that don't cope well with the
350 + expand down segment used by UDEREF on X86-32 or the frequent
351 + page table updates on X86-64.
354 + Format: { 0 | 1 | off | fast | full }
355 + Options '0' and '1' are only provided for backward
356 + compatibility, 'off' or 'fast' should be used instead.
357 + 0|off : disable slab object sanitization
358 + 1|fast: enable slab object sanitization excluding
359 + whitelisted slabs (default)
360 + full : sanitize all slabs, even the whitelisted ones
362 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
364 + pax_extra_latent_entropy
365 + Enable a very simple form of latent entropy extraction
366 + from the first 4GB of memory as the bootmem allocator
367 + passes the memory pages to the buddy allocator.
369 + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370 + when the processor supports PCID.
375 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
376 index c831001..1bfbbf6 100644
377 --- a/Documentation/sysctl/kernel.txt
378 +++ b/Documentation/sysctl/kernel.txt
379 @@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
381 - kstack_depth_to_print [ X86 only ]
383 +- modify_ldt [ X86 only ]
384 - modprobe ==> Documentation/debugging-modules.txt
386 - msg_next_id [ sysv ipc ]
387 @@ -391,6 +392,20 @@ This flag controls the L2 cache of G3 processor boards. If
389 ==============================================================
391 +modify_ldt: (X86 only)
393 +Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
394 +(Local Descriptor Table) may be needed to run a 16-bit or segmented code
395 +such as Dosemu or Wine. This is done via a system call which is not needed
396 +to run portable applications, and which can sometimes be abused to exploit
397 +some weaknesses of the architecture, opening new vulnerabilities.
399 +This sysctl allows one to increase the system's security by disabling the
400 +system call, or to restore compatibility with specific applications when it
401 +was already disabled.
403 +==============================================================
407 A toggle value indicating if modules are allowed to be loaded
408 diff --git a/Makefile b/Makefile
409 index e3cdec4..56ae73d 100644
412 @@ -299,7 +299,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
415 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
417 +HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
418 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
419 +HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
421 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
422 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
423 @@ -444,8 +446,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
424 # Rules shared between *config targets and build targets
426 # Basic helpers built in scripts/
427 -PHONY += scripts_basic
429 +PHONY += scripts_basic gcc-plugins
430 +scripts_basic: gcc-plugins
431 $(Q)$(MAKE) $(build)=scripts/basic
432 $(Q)rm -f .tmp_quiet_recordmcount
434 @@ -620,6 +622,74 @@ endif
435 # Tell gcc to never replace conditional load with a non-conditional one
436 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
438 +ifndef DISABLE_PAX_PLUGINS
439 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
440 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
442 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
444 +ifneq ($(PLUGINCC),)
445 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
446 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
448 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
449 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
450 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
452 +ifdef CONFIG_KALLOCSTAT_PLUGIN
453 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
455 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
456 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
457 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
458 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
460 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT
461 +RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
462 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
463 +RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
466 +ifdef CONFIG_CHECKER_PLUGIN
467 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
468 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
471 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
472 +ifdef CONFIG_PAX_SIZE_OVERFLOW
473 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
475 +ifdef CONFIG_PAX_LATENT_ENTROPY
476 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
478 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
479 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
481 +INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
482 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
483 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
484 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
485 +GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
486 +GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
487 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
488 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
489 +ifeq ($(KBUILD_EXTMOD),)
491 + $(Q)$(MAKE) $(build)=tools/gcc
497 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
498 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
500 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
502 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
506 ifdef CONFIG_READABLE_ASM
507 # Disable optimizations that make assembler listings hard to read.
508 # reorder blocks reorders the control in the function
509 @@ -712,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
513 -KBUILD_AFLAGS += -Wa,-gdwarf-2
514 +KBUILD_AFLAGS += -Wa,--gdwarf-2
516 ifdef CONFIG_DEBUG_INFO_DWARF4
517 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
518 @@ -883,7 +953,7 @@ export mod_sign_cmd
521 ifeq ($(KBUILD_EXTMOD),)
522 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
523 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
525 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
526 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
527 @@ -933,6 +1003,8 @@ endif
529 # The actual objects are generated when descending,
530 # make sure no implicit rule kicks in
531 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
535 # Handle descending into subdirectories listed in $(vmlinux-dirs)
536 @@ -942,7 +1014,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
537 # Error messages still appears in the original language
539 PHONY += $(vmlinux-dirs)
540 -$(vmlinux-dirs): prepare scripts
541 +$(vmlinux-dirs): gcc-plugins prepare scripts
542 $(Q)$(MAKE) $(build)=$@
544 define filechk_kernel.release
545 @@ -985,10 +1057,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
547 archprepare: archheaders archscripts prepare1 scripts_basic
549 +prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
550 +prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
551 prepare0: archprepare FORCE
552 $(Q)$(MAKE) $(build)=.
554 # All the preparing..
555 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
558 # Generate some files
559 @@ -1096,6 +1171,8 @@ all: modules
560 # using awk while concatenating to the final file.
563 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
564 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
565 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
566 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
567 @$(kecho) ' Building modules, stage 2.';
568 @@ -1111,7 +1188,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
570 # Target to prepare building external modules
571 PHONY += modules_prepare
572 -modules_prepare: prepare scripts
573 +modules_prepare: gcc-plugins prepare scripts
575 # Target to install modules
576 PHONY += modules_install
577 @@ -1177,7 +1254,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
578 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
579 signing_key.priv signing_key.x509 x509.genkey \
580 extra_certificates signing_key.x509.keyid \
581 - signing_key.x509.signer vmlinux-gdb.py
582 + signing_key.x509.signer vmlinux-gdb.py \
583 + tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
584 + tools/gcc/size_overflow_plugin/size_overflow_hash.h \
585 + tools/gcc/randomize_layout_seed.h
587 # clean - Delete most, but leave enough to build external modules
589 @@ -1216,7 +1296,7 @@ distclean: mrproper
590 @find $(srctree) $(RCS_FIND_IGNORE) \
591 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
592 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
593 - -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
594 + -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
595 -type f -print | xargs rm -f
598 @@ -1382,6 +1462,8 @@ PHONY += $(module-dirs) modules
599 $(module-dirs): crmodverdir $(objtree)/Module.symvers
600 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
602 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
603 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
604 modules: $(module-dirs)
605 @$(kecho) ' Building modules, stage 2.';
606 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
607 @@ -1522,17 +1604,21 @@ else
608 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
611 -%.s: %.c prepare scripts FORCE
612 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
613 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
614 +%.s: %.c gcc-plugins prepare scripts FORCE
615 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
616 %.i: %.c prepare scripts FORCE
617 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
618 -%.o: %.c prepare scripts FORCE
619 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
620 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
621 +%.o: %.c gcc-plugins prepare scripts FORCE
622 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
623 %.lst: %.c prepare scripts FORCE
624 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
625 -%.s: %.S prepare scripts FORCE
626 +%.s: %.S gcc-plugins prepare scripts FORCE
627 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
628 -%.o: %.S prepare scripts FORCE
629 +%.o: %.S gcc-plugins prepare scripts FORCE
630 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
631 %.symtypes: %.c prepare scripts FORCE
632 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
633 @@ -1544,11 +1630,15 @@ endif
634 $(build)=$(build-dir)
635 # Make sure the latest headers are built for Documentation
636 Documentation/: headers_install
637 -%/: prepare scripts FORCE
638 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
639 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
640 +%/: gcc-plugins prepare scripts FORCE
642 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
643 $(build)=$(build-dir)
644 -%.ko: prepare scripts FORCE
645 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
646 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
647 +%.ko: gcc-plugins prepare scripts FORCE
649 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
650 $(build)=$(build-dir) $(@:.ko=.o)
651 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
652 index 8f8eafb..3405f46 100644
653 --- a/arch/alpha/include/asm/atomic.h
654 +++ b/arch/alpha/include/asm/atomic.h
655 @@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
656 #define atomic_dec(v) atomic_sub(1,(v))
657 #define atomic64_dec(v) atomic64_sub(1,(v))
659 +#define atomic64_read_unchecked(v) atomic64_read(v)
660 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
661 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
662 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
663 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
664 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
665 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
666 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
667 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
669 #endif /* _ALPHA_ATOMIC_H */
670 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
671 index ad368a9..fbe0f25 100644
672 --- a/arch/alpha/include/asm/cache.h
673 +++ b/arch/alpha/include/asm/cache.h
675 #ifndef __ARCH_ALPHA_CACHE_H
676 #define __ARCH_ALPHA_CACHE_H
678 +#include <linux/const.h>
680 /* Bytes per L1 (data) cache line. */
681 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
682 -# define L1_CACHE_BYTES 64
683 # define L1_CACHE_SHIFT 6
685 /* Both EV4 and EV5 are write-through, read-allocate,
686 direct-mapped, physical.
688 -# define L1_CACHE_BYTES 32
689 # define L1_CACHE_SHIFT 5
692 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
693 #define SMP_CACHE_BYTES L1_CACHE_BYTES
696 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
697 index 968d999..d36b2df 100644
698 --- a/arch/alpha/include/asm/elf.h
699 +++ b/arch/alpha/include/asm/elf.h
700 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
702 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
704 +#ifdef CONFIG_PAX_ASLR
705 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
707 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
708 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
711 /* $0 is set by ld.so to a pointer to a function which might be
712 registered using atexit. This provides a mean for the dynamic
713 linker to call DT_FINI functions for shared libraries that have
714 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
715 index aab14a0..b4fa3e7 100644
716 --- a/arch/alpha/include/asm/pgalloc.h
717 +++ b/arch/alpha/include/asm/pgalloc.h
718 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
723 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
725 + pgd_populate(mm, pgd, pmd);
728 extern pgd_t *pgd_alloc(struct mm_struct *mm);
731 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
732 index a9a1195..e9b8417 100644
733 --- a/arch/alpha/include/asm/pgtable.h
734 +++ b/arch/alpha/include/asm/pgtable.h
735 @@ -101,6 +101,17 @@ struct vm_area_struct;
736 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
737 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
738 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
740 +#ifdef CONFIG_PAX_PAGEEXEC
741 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
742 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
743 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
745 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
746 +# define PAGE_COPY_NOEXEC PAGE_COPY
747 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
750 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
752 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
753 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
754 index 2fd00b7..cfd5069 100644
755 --- a/arch/alpha/kernel/module.c
756 +++ b/arch/alpha/kernel/module.c
757 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
759 /* The small sections were sorted to the end of the segment.
760 The following should definitely cover them. */
761 - gp = (u64)me->module_core + me->core_size - 0x8000;
762 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
763 got = sechdrs[me->arch.gotsecindex].sh_addr;
765 for (i = 0; i < n; i++) {
766 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
767 index 36dc91a..6769cb0 100644
768 --- a/arch/alpha/kernel/osf_sys.c
769 +++ b/arch/alpha/kernel/osf_sys.c
770 @@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
771 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
774 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
775 - unsigned long limit)
776 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
777 + unsigned long limit, unsigned long flags)
779 struct vm_unmapped_area_info info;
780 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
784 @@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
785 info.high_limit = limit;
787 info.align_offset = 0;
788 + info.threadstack_offset = offset;
789 return vm_unmapped_area(&info);
792 @@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
793 merely specific addresses, but regions of memory -- perhaps
794 this feature should be incorporated into all ports? */
796 +#ifdef CONFIG_PAX_RANDMMAP
797 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
801 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
802 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
803 if (addr != (unsigned long) -ENOMEM)
807 /* Next, try allocating at TASK_UNMAPPED_BASE. */
808 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
810 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
812 if (addr != (unsigned long) -ENOMEM)
815 /* Finally, try allocating in low memory. */
816 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
817 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
821 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
822 index 9d0ac09..479a962 100644
823 --- a/arch/alpha/mm/fault.c
824 +++ b/arch/alpha/mm/fault.c
825 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
826 __reload_thread(pcb);
829 +#ifdef CONFIG_PAX_PAGEEXEC
831 + * PaX: decide what to do with offenders (regs->pc = fault address)
833 + * returns 1 when task should be killed
834 + * 2 when patched PLT trampoline was detected
835 + * 3 when unpatched PLT trampoline was detected
837 +static int pax_handle_fetch_fault(struct pt_regs *regs)
840 +#ifdef CONFIG_PAX_EMUPLT
843 + do { /* PaX: patched PLT emulation #1 */
844 + unsigned int ldah, ldq, jmp;
846 + err = get_user(ldah, (unsigned int *)regs->pc);
847 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
848 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
853 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
854 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
855 + jmp == 0x6BFB0000U)
857 + unsigned long r27, addr;
858 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
859 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
861 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
862 + err = get_user(r27, (unsigned long *)addr);
872 + do { /* PaX: patched PLT emulation #2 */
873 + unsigned int ldah, lda, br;
875 + err = get_user(ldah, (unsigned int *)regs->pc);
876 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
877 + err |= get_user(br, (unsigned int *)(regs->pc+8));
882 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
883 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
884 + (br & 0xFFE00000U) == 0xC3E00000U)
886 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
887 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
888 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
890 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
891 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
896 + do { /* PaX: unpatched PLT emulation */
899 + err = get_user(br, (unsigned int *)regs->pc);
901 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
902 + unsigned int br2, ldq, nop, jmp;
903 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
905 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
906 + err = get_user(br2, (unsigned int *)addr);
907 + err |= get_user(ldq, (unsigned int *)(addr+4));
908 + err |= get_user(nop, (unsigned int *)(addr+8));
909 + err |= get_user(jmp, (unsigned int *)(addr+12));
910 + err |= get_user(resolver, (unsigned long *)(addr+16));
915 + if (br2 == 0xC3600000U &&
916 + ldq == 0xA77B000CU &&
917 + nop == 0x47FF041FU &&
918 + jmp == 0x6B7B0000U)
920 + regs->r28 = regs->pc+4;
921 + regs->r27 = addr+16;
922 + regs->pc = resolver;
932 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
936 + printk(KERN_ERR "PAX: bytes at PC: ");
937 + for (i = 0; i < 5; i++) {
939 + if (get_user(c, (unsigned int *)pc+i))
940 + printk(KERN_CONT "???????? ");
942 + printk(KERN_CONT "%08x ", c);
949 * This routine handles page faults. It determines the address,
950 @@ -133,8 +251,29 @@ retry:
952 si_code = SEGV_ACCERR;
954 - if (!(vma->vm_flags & VM_EXEC))
955 + if (!(vma->vm_flags & VM_EXEC)) {
957 +#ifdef CONFIG_PAX_PAGEEXEC
958 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
961 + up_read(&mm->mmap_sem);
962 + switch (pax_handle_fetch_fault(regs)) {
964 +#ifdef CONFIG_PAX_EMUPLT
971 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
972 + do_group_exit(SIGKILL);
979 /* Allow reads even for write-only mappings */
980 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
981 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
982 index 45df48b..952017a 100644
983 --- a/arch/arm/Kconfig
984 +++ b/arch/arm/Kconfig
985 @@ -1716,7 +1716,7 @@ config ALIGNMENT_TRAP
987 config UACCESS_WITH_MEMCPY
988 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
990 + depends on MMU && !PAX_MEMORY_UDEREF
991 default y if CPU_FEROCEON
993 Implement faster copy_to_user and clear_user methods for CPU
994 @@ -1951,6 +1951,7 @@ config XIP_PHYS_ADDR
996 bool "Kexec system call (EXPERIMENTAL)"
997 depends on (!SMP || PM_SLEEP_SMP)
998 + depends on !GRKERNSEC_KMEM
1000 kexec is a system call that implements the ability to shutdown your
1001 current kernel, and to start another kernel. It is like a reboot
1002 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1003 index e22c119..abe7041 100644
1004 --- a/arch/arm/include/asm/atomic.h
1005 +++ b/arch/arm/include/asm/atomic.h
1007 #include <asm/barrier.h>
1008 #include <asm/cmpxchg.h>
1010 +#ifdef CONFIG_GENERIC_ATOMIC64
1011 +#include <asm-generic/atomic64.h>
1014 #define ATOMIC_INIT(i) { (i) }
1018 +#ifdef CONFIG_THUMB2_KERNEL
1019 +#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
1021 +#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1024 +#define _ASM_EXTABLE(from, to) \
1025 +" .pushsection __ex_table,\"a\"\n"\
1027 +" .long " #from ", " #to"\n" \
1031 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1032 * strex/ldrex monitor on some implementations. The reason we can use it for
1033 * atomic_set() is the clrex or dummy strex done on every exception return.
1035 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1036 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1038 + return ACCESS_ONCE(v->counter);
1040 #define atomic_set(v,i) (((v)->counter) = (i))
1041 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1046 #if __LINUX_ARM_ARCH__ >= 6
1049 * to ensure that the update happens.
1052 -#define ATOMIC_OP(op, c_op, asm_op) \
1053 -static inline void atomic_##op(int i, atomic_t *v) \
1054 +#ifdef CONFIG_PAX_REFCOUNT
1055 +#define __OVERFLOW_POST \
1057 + "2: " REFCOUNT_TRAP_INSN "\n"\
1059 +#define __OVERFLOW_POST_RETURN \
1062 + "2: " REFCOUNT_TRAP_INSN "\n"\
1064 +#define __OVERFLOW_EXTABLE \
1066 + _ASM_EXTABLE(2b, 4b)
1068 +#define __OVERFLOW_POST
1069 +#define __OVERFLOW_POST_RETURN
1070 +#define __OVERFLOW_EXTABLE
1073 +#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1074 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1076 unsigned long tmp; \
1079 prefetchw(&v->counter); \
1080 - __asm__ __volatile__("@ atomic_" #op "\n" \
1081 + __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1082 "1: ldrex %0, [%3]\n" \
1083 " " #asm_op " %0, %0, %4\n" \
1085 " strex %1, %0, [%3]\n" \
1090 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1091 : "r" (&v->counter), "Ir" (i) \
1095 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1096 -static inline int atomic_##op##_return(int i, atomic_t *v) \
1097 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1098 + __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1100 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1101 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1103 unsigned long tmp; \
1105 @@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1107 prefetchw(&v->counter); \
1109 - __asm__ __volatile__("@ atomic_" #op "_return\n" \
1110 + __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1111 "1: ldrex %0, [%3]\n" \
1112 " " #asm_op " %0, %0, %4\n" \
1114 " strex %1, %0, [%3]\n" \
1119 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1120 : "r" (&v->counter), "Ir" (i) \
1122 @@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1126 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1127 + __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1129 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1132 @@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1133 __asm__ __volatile__ ("@ atomic_add_unless\n"
1134 "1: ldrex %0, [%4]\n"
1137 -" add %1, %0, %6\n"
1139 +" adds %1, %0, %6\n"
1141 +#ifdef CONFIG_PAX_REFCOUNT
1143 +"2: " REFCOUNT_TRAP_INSN "\n"
1147 " strex %2, %1, [%4]\n"
1153 +#ifdef CONFIG_PAX_REFCOUNT
1154 + _ASM_EXTABLE(2b, 4b)
1157 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1158 : "r" (&v->counter), "r" (u), "r" (a)
1160 @@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1164 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1166 + unsigned long oldval, res;
1171 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1172 + "ldrex %1, [%3]\n"
1175 + "strexeq %0, %5, [%3]\n"
1176 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1177 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1186 #else /* ARM_ARCH_6 */
1189 #error SMP not supported on pre-ARMv6 CPUs
1192 -#define ATOMIC_OP(op, c_op, asm_op) \
1193 -static inline void atomic_##op(int i, atomic_t *v) \
1194 +#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1195 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1197 unsigned long flags; \
1199 @@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1200 raw_local_irq_restore(flags); \
1203 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1204 -static inline int atomic_##op##_return(int i, atomic_t *v) \
1205 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1206 + __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1208 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1209 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1211 unsigned long flags; \
1213 @@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1217 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1218 + __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1220 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1223 @@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1227 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1229 + return atomic_cmpxchg((atomic_t *)v, old, new);
1232 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1235 @@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1238 #undef ATOMIC_OP_RETURN
1239 +#undef __ATOMIC_OP_RETURN
1243 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1244 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1246 + return xchg(&v->counter, new);
1249 #define atomic_inc(v) atomic_add(1, v)
1250 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1252 + atomic_add_unchecked(1, v);
1254 #define atomic_dec(v) atomic_sub(1, v)
1255 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1257 + atomic_sub_unchecked(1, v);
1260 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1261 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1263 + return atomic_add_return_unchecked(1, v) == 0;
1265 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1266 #define atomic_inc_return(v) (atomic_add_return(1, v))
1267 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1269 + return atomic_add_return_unchecked(1, v);
1271 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1272 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1274 @@ -216,6 +336,14 @@ typedef struct {
1278 +#ifdef CONFIG_PAX_REFCOUNT
1280 + long long counter;
1281 +} atomic64_unchecked_t;
1283 +typedef atomic64_t atomic64_unchecked_t;
1286 #define ATOMIC64_INIT(i) { (i) }
1288 #ifdef CONFIG_ARM_LPAE
1289 @@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1293 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1297 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1298 +" ldrd %0, %H0, [%1]"
1300 + : "r" (&v->counter), "Qo" (v->counter)
1306 static inline void atomic64_set(atomic64_t *v, long long i)
1308 __asm__ __volatile__("@ atomic64_set\n"
1309 @@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1310 : "r" (&v->counter), "r" (i)
1314 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1317 +" strd %2, %H2, [%1]"
1318 + : "=Qo" (v->counter)
1319 + : "r" (&v->counter), "r" (i)
1323 static inline long long atomic64_read(const atomic64_t *v)
1325 @@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1329 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1333 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1334 +" ldrexd %0, %H0, [%1]"
1336 + : "r" (&v->counter), "Qo" (v->counter)
1342 static inline void atomic64_set(atomic64_t *v, long long i)
1345 @@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1346 : "r" (&v->counter), "r" (i)
1350 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1354 + prefetchw(&v->counter);
1355 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1356 +"1: ldrexd %0, %H0, [%2]\n"
1357 +" strexd %0, %3, %H3, [%2]\n"
1360 + : "=&r" (tmp), "=Qo" (v->counter)
1361 + : "r" (&v->counter), "r" (i)
1366 -#define ATOMIC64_OP(op, op1, op2) \
1367 -static inline void atomic64_##op(long long i, atomic64_t *v) \
1368 +#undef __OVERFLOW_POST_RETURN
1369 +#define __OVERFLOW_POST_RETURN \
1372 +" mov %H0, %H1\n" \
1373 + "2: " REFCOUNT_TRAP_INSN "\n"\
1376 +#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1377 +static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1380 unsigned long tmp; \
1382 prefetchw(&v->counter); \
1383 - __asm__ __volatile__("@ atomic64_" #op "\n" \
1384 + __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1385 "1: ldrexd %0, %H0, [%3]\n" \
1386 " " #op1 " %Q0, %Q0, %Q4\n" \
1387 " " #op2 " %R0, %R0, %R4\n" \
1389 " strexd %1, %0, %H0, [%3]\n" \
1394 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1395 : "r" (&v->counter), "r" (i) \
1399 -#define ATOMIC64_OP_RETURN(op, op1, op2) \
1400 -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1401 +#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1402 + __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1404 +#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1405 +static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1408 unsigned long tmp; \
1409 @@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1411 prefetchw(&v->counter); \
1413 - __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1414 + __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1415 "1: ldrexd %0, %H0, [%3]\n" \
1416 " " #op1 " %Q0, %Q0, %Q4\n" \
1417 " " #op2 " %R0, %R0, %R4\n" \
1419 " strexd %1, %0, %H0, [%3]\n" \
1424 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1425 : "r" (&v->counter), "r" (i) \
1427 @@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1431 +#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1432 + __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1434 #define ATOMIC64_OPS(op, op1, op2) \
1435 ATOMIC64_OP(op, op1, op2) \
1436 ATOMIC64_OP_RETURN(op, op1, op2)
1437 @@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1440 #undef ATOMIC64_OP_RETURN
1441 +#undef __ATOMIC64_OP_RETURN
1443 +#undef __ATOMIC64_OP
1444 +#undef __OVERFLOW_EXTABLE
1445 +#undef __OVERFLOW_POST_RETURN
1446 +#undef __OVERFLOW_POST
1448 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1450 @@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1454 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1458 + unsigned long res;
1463 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1464 + "ldrexd %1, %H1, [%3]\n"
1467 + "teqeq %H1, %H4\n"
1468 + "strexdeq %0, %5, %H5, [%3]"
1469 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1470 + : "r" (&ptr->counter), "r" (old), "r" (new)
1479 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1482 @@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1483 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1486 - unsigned long tmp;
1490 prefetchw(&v->counter);
1492 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1493 -"1: ldrexd %0, %H0, [%3]\n"
1494 -" subs %Q0, %Q0, #1\n"
1495 -" sbc %R0, %R0, #0\n"
1496 +"1: ldrexd %1, %H1, [%3]\n"
1497 +" subs %Q0, %Q1, #1\n"
1498 +" sbcs %R0, %R1, #0\n"
1500 +#ifdef CONFIG_PAX_REFCOUNT
1504 +"2: " REFCOUNT_TRAP_INSN "\n"
1511 " strexd %1, %0, %H0, [%3]\n"
1517 +#ifdef CONFIG_PAX_REFCOUNT
1518 + _ASM_EXTABLE(2b, 4b)
1521 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1524 @@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1530 " adds %Q0, %Q0, %Q6\n"
1531 -" adc %R0, %R0, %R6\n"
1532 +" adcs %R0, %R0, %R6\n"
1534 +#ifdef CONFIG_PAX_REFCOUNT
1536 +"2: " REFCOUNT_TRAP_INSN "\n"
1540 " strexd %2, %0, %H0, [%4]\n"
1546 +#ifdef CONFIG_PAX_REFCOUNT
1547 + _ASM_EXTABLE(2b, 4b)
1550 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1551 : "r" (&v->counter), "r" (u), "r" (a)
1553 @@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1555 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1556 #define atomic64_inc(v) atomic64_add(1LL, (v))
1557 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1558 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1559 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1560 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1561 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1562 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1563 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1564 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1565 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1566 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1567 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1568 index d2f81e6..3c4dba5 100644
1569 --- a/arch/arm/include/asm/barrier.h
1570 +++ b/arch/arm/include/asm/barrier.h
1573 compiletime_assert_atomic_type(*p); \
1575 - ACCESS_ONCE(*p) = (v); \
1576 + ACCESS_ONCE_RW(*p) = (v); \
1579 #define smp_load_acquire(p) \
1580 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1581 index 75fe66b..ba3dee4 100644
1582 --- a/arch/arm/include/asm/cache.h
1583 +++ b/arch/arm/include/asm/cache.h
1585 #ifndef __ASMARM_CACHE_H
1586 #define __ASMARM_CACHE_H
1588 +#include <linux/const.h>
1590 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1591 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1592 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1595 * Memory returned by kmalloc() may be used for DMA, so we must make
1599 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1600 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1603 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1604 index 2d46862..a35415b 100644
1605 --- a/arch/arm/include/asm/cacheflush.h
1606 +++ b/arch/arm/include/asm/cacheflush.h
1607 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1608 void (*dma_unmap_area)(const void *, size_t, int);
1610 void (*dma_flush_range)(const void *, const void *);
1615 * Select the calling method
1616 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1617 index 5233151..87a71fa 100644
1618 --- a/arch/arm/include/asm/checksum.h
1619 +++ b/arch/arm/include/asm/checksum.h
1620 @@ -37,7 +37,19 @@ __wsum
1621 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1624 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1625 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1627 +static inline __wsum
1628 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1631 + pax_open_userland();
1632 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1633 + pax_close_userland();
1640 * Fold a partial checksum without adding pseudo headers
1641 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1642 index abb2c37..96db950 100644
1643 --- a/arch/arm/include/asm/cmpxchg.h
1644 +++ b/arch/arm/include/asm/cmpxchg.h
1645 @@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1647 #define xchg(ptr,x) \
1648 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1649 +#define xchg_unchecked(ptr,x) \
1650 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1652 #include <asm-generic/cmpxchg-local.h>
1654 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1655 index 6ddbe44..b5e38b1a 100644
1656 --- a/arch/arm/include/asm/domain.h
1657 +++ b/arch/arm/include/asm/domain.h
1661 #define DOMAIN_NOACCESS 0
1662 -#define DOMAIN_CLIENT 1
1663 #ifdef CONFIG_CPU_USE_DOMAINS
1664 +#define DOMAIN_USERCLIENT 1
1665 +#define DOMAIN_KERNELCLIENT 1
1666 #define DOMAIN_MANAGER 3
1667 +#define DOMAIN_VECTORS DOMAIN_USER
1670 +#ifdef CONFIG_PAX_KERNEXEC
1671 #define DOMAIN_MANAGER 1
1672 +#define DOMAIN_KERNEXEC 3
1674 +#define DOMAIN_MANAGER 1
1677 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1678 +#define DOMAIN_USERCLIENT 0
1679 +#define DOMAIN_UDEREF 1
1680 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1682 +#define DOMAIN_USERCLIENT 1
1683 +#define DOMAIN_VECTORS DOMAIN_USER
1685 +#define DOMAIN_KERNELCLIENT 1
1689 #define domain_val(dom,type) ((type) << (2*(dom)))
1691 #ifndef __ASSEMBLY__
1693 -#ifdef CONFIG_CPU_USE_DOMAINS
1694 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1695 static inline void set_domain(unsigned val)
1698 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1702 -#define modify_domain(dom,type) \
1704 - struct thread_info *thread = current_thread_info(); \
1705 - unsigned int domain = thread->cpu_domain; \
1706 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1707 - thread->cpu_domain = domain | domain_val(dom, type); \
1708 - set_domain(thread->cpu_domain); \
1711 +extern void modify_domain(unsigned int dom, unsigned int type);
1713 static inline void set_domain(unsigned val) { }
1714 static inline void modify_domain(unsigned dom, unsigned type) { }
1715 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1716 index d2315ff..f60b47b 100644
1717 --- a/arch/arm/include/asm/elf.h
1718 +++ b/arch/arm/include/asm/elf.h
1719 @@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1720 the loader. We need to make sure that it is out of the way of the program
1721 that it will "exec", and that there is sufficient room for the brk. */
1723 -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1724 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1726 +#ifdef CONFIG_PAX_ASLR
1727 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1729 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1730 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1733 /* When the program starts, a1 contains a pointer to a function to be
1734 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1735 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1736 index de53547..52b9a28 100644
1737 --- a/arch/arm/include/asm/fncpy.h
1738 +++ b/arch/arm/include/asm/fncpy.h
1740 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1741 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1743 + pax_open_kernel(); \
1744 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1745 + pax_close_kernel(); \
1746 flush_icache_range((unsigned long)(dest_buf), \
1747 (unsigned long)(dest_buf) + (size)); \
1749 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1750 index 4e78065..f265b48 100644
1751 --- a/arch/arm/include/asm/futex.h
1752 +++ b/arch/arm/include/asm/futex.h
1753 @@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1757 + pax_open_userland();
1760 /* Prefetching cannot fault */
1762 @@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1766 + pax_close_userland();
1771 @@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1772 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1775 + pax_open_userland();
1777 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1778 "1: " TUSER(ldr) " %1, [%4]\n"
1780 @@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1781 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1784 + pax_close_userland();
1789 @@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1792 pagefault_disable(); /* implies preempt_disable() */
1793 + pax_open_userland();
1797 @@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1801 + pax_close_userland();
1802 pagefault_enable(); /* subsumes preempt_enable() */
1805 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1806 index 83eb2f7..ed77159 100644
1807 --- a/arch/arm/include/asm/kmap_types.h
1808 +++ b/arch/arm/include/asm/kmap_types.h
1811 * This is the "bare minimum". AIO seems to require this.
1813 -#define KM_TYPE_NR 16
1814 +#define KM_TYPE_NR 17
1817 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1818 index 9e614a1..3302cca 100644
1819 --- a/arch/arm/include/asm/mach/dma.h
1820 +++ b/arch/arm/include/asm/mach/dma.h
1821 @@ -22,7 +22,7 @@ struct dma_ops {
1822 int (*residue)(unsigned int, dma_t *); /* optional */
1823 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1829 void *addr; /* single DMA address */
1830 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1831 index f98c7f3..e5c626d 100644
1832 --- a/arch/arm/include/asm/mach/map.h
1833 +++ b/arch/arm/include/asm/mach/map.h
1834 @@ -23,17 +23,19 @@ struct map_desc {
1836 /* types 0-3 are defined in asm/io.h */
1841 + MT_UNCACHED_RW = 4,
1850 - MT_MEMORY_RWX_NONCACHED,
1853 + MT_MEMORY_RW_NONCACHED,
1854 + MT_MEMORY_RX_NONCACHED,
1856 - MT_MEMORY_RWX_ITCM,
1857 + MT_MEMORY_RX_ITCM,
1859 MT_MEMORY_DMA_READY,
1861 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1862 index 563b92f..689d58e 100644
1863 --- a/arch/arm/include/asm/outercache.h
1864 +++ b/arch/arm/include/asm/outercache.h
1865 @@ -39,7 +39,7 @@ struct outer_cache_fns {
1866 /* This is an ARM L2C thing */
1867 void (*write_sec)(unsigned long, unsigned);
1868 void (*configure)(const struct l2x0_regs *);
1872 extern struct outer_cache_fns outer_cache;
1874 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1875 index 4355f0e..cd9168e 100644
1876 --- a/arch/arm/include/asm/page.h
1877 +++ b/arch/arm/include/asm/page.h
1882 +#include <linux/compiler.h>
1883 #include <asm/glue.h>
1886 @@ -114,7 +115,7 @@ struct cpu_user_fns {
1887 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1888 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1889 unsigned long vaddr, struct vm_area_struct *vma);
1894 extern struct cpu_user_fns cpu_user;
1895 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1896 index 19cfab5..3f5c7e9 100644
1897 --- a/arch/arm/include/asm/pgalloc.h
1898 +++ b/arch/arm/include/asm/pgalloc.h
1900 #include <asm/processor.h>
1901 #include <asm/cacheflush.h>
1902 #include <asm/tlbflush.h>
1903 +#include <asm/system_info.h>
1905 #define check_pgt_cache() do { } while (0)
1907 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1908 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1911 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1913 + pud_populate(mm, pud, pmd);
1916 #else /* !CONFIG_ARM_LPAE */
1919 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1920 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1921 #define pmd_free(mm, pmd) do { } while (0)
1922 #define pud_populate(mm,pmd,pte) BUG()
1923 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1925 #endif /* CONFIG_ARM_LPAE */
1927 @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1931 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1933 +#ifdef CONFIG_ARM_LPAE
1934 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1936 + if (addr & SECTION_SIZE)
1937 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1939 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1941 + flush_pmd_entry(pmdp);
1944 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1947 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1948 index 5e68278..1869bae 100644
1949 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1950 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1955 -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1956 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1957 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1958 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1959 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1961 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1962 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1963 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1964 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1966 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1967 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1969 * - extended small page/tiny page
1971 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1972 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1973 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1974 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1975 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1976 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1977 index bfd662e..f6cbb02 100644
1978 --- a/arch/arm/include/asm/pgtable-2level.h
1979 +++ b/arch/arm/include/asm/pgtable-2level.h
1981 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1982 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1984 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1985 +#define L_PTE_PXN (_AT(pteval_t, 0))
1988 * These are the memory types, defined to be compatible with
1989 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1990 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1991 index a745a2a..481350a 100644
1992 --- a/arch/arm/include/asm/pgtable-3level.h
1993 +++ b/arch/arm/include/asm/pgtable-3level.h
1995 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1996 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1997 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1998 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1999 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2000 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2001 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2003 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2004 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2005 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2006 +#define PMD_SECT_RDONLY PMD_SECT_AP2
2009 * To be used in assembly code with the upper page attributes.
2011 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
2012 #define L_PTE_XN_HIGH (1 << (54 - 32))
2013 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2015 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2016 index f403541..b10df68 100644
2017 --- a/arch/arm/include/asm/pgtable.h
2018 +++ b/arch/arm/include/asm/pgtable.h
2020 #include <asm/pgtable-2level.h>
2023 +#define ktla_ktva(addr) (addr)
2024 +#define ktva_ktla(addr) (addr)
2027 * Just any arbitrary offset to the start of the vmalloc VM area: the
2028 * current 8MB value just means that there will be a 8MB "hole" after the
2030 #define LIBRARY_TEXT_START 0x0c000000
2032 #ifndef __ASSEMBLY__
2033 +extern pteval_t __supported_pte_mask;
2034 +extern pmdval_t __supported_pmd_mask;
2036 extern void __pte_error(const char *file, int line, pte_t);
2037 extern void __pmd_error(const char *file, int line, pmd_t);
2038 extern void __pgd_error(const char *file, int line, pgd_t);
2039 @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2040 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2041 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2043 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
2044 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2046 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2047 +#include <asm/domain.h>
2048 +#include <linux/thread_info.h>
2049 +#include <linux/preempt.h>
2051 +static inline int test_domain(int domain, int domaintype)
2053 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2057 +#ifdef CONFIG_PAX_KERNEXEC
2058 +static inline unsigned long pax_open_kernel(void) {
2059 +#ifdef CONFIG_ARM_LPAE
2062 + preempt_disable();
2063 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2064 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2069 +static inline unsigned long pax_close_kernel(void) {
2070 +#ifdef CONFIG_ARM_LPAE
2073 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2074 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
2075 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2076 + preempt_enable_no_resched();
2081 +static inline unsigned long pax_open_kernel(void) { return 0; }
2082 +static inline unsigned long pax_close_kernel(void) { return 0; }
2086 * This is the lowest virtual address we can permit any user space
2087 * mapping to be mapped at. This is particularly important for
2088 @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2090 * The pgprot_* and protection_map entries will be fixed up in runtime
2091 * to include the cachable and bufferable bits based on memory policy,
2092 - * as well as any architecture dependent bits like global/ASID and SMP
2093 - * shared mapping bits.
2094 + * as well as any architecture dependent bits like global/ASID, PXN,
2095 + * and SMP shared mapping bits.
2097 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2099 @@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2100 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2102 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2103 - L_PTE_NONE | L_PTE_VALID;
2104 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2105 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2108 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2109 index c25ef3e..735f14b 100644
2110 --- a/arch/arm/include/asm/psci.h
2111 +++ b/arch/arm/include/asm/psci.h
2112 @@ -32,7 +32,7 @@ struct psci_operations {
2113 int (*affinity_info)(unsigned long target_affinity,
2114 unsigned long lowest_affinity_level);
2115 int (*migrate_info_type)(void);
2119 extern struct psci_operations psci_ops;
2120 extern struct smp_operations psci_smp_ops;
2121 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2122 index 18f5a55..5072a40 100644
2123 --- a/arch/arm/include/asm/smp.h
2124 +++ b/arch/arm/include/asm/smp.h
2125 @@ -107,7 +107,7 @@ struct smp_operations {
2126 int (*cpu_disable)(unsigned int cpu);
2132 struct of_cpu_method {
2134 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2135 index bd32ede..bd90a0b 100644
2136 --- a/arch/arm/include/asm/thread_info.h
2137 +++ b/arch/arm/include/asm/thread_info.h
2138 @@ -74,9 +74,9 @@ struct thread_info {
2140 .preempt_count = INIT_PREEMPT_COUNT, \
2141 .addr_limit = KERNEL_DS, \
2142 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2143 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2144 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2145 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2146 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2147 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2150 #define init_thread_info (init_thread_union.thread_info)
2151 @@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2152 #define TIF_SYSCALL_AUDIT 9
2153 #define TIF_SYSCALL_TRACEPOINT 10
2154 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2155 -#define TIF_NOHZ 12 /* in adaptive nohz mode */
2156 +/* within 8 bits of TIF_SYSCALL_TRACE
2157 + * to meet flexible second operand requirements
2159 +#define TIF_GRSEC_SETXID 12
2160 +#define TIF_NOHZ 13 /* in adaptive nohz mode */
2161 #define TIF_USING_IWMMXT 17
2162 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2163 #define TIF_RESTORE_SIGMASK 20
2164 @@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2165 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2166 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2167 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2168 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2170 /* Checks for any syscall work in entry-common.S */
2171 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2172 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2173 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2176 * Change these and you break ASM code in entry-common.S
2177 diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2178 index 5f833f7..76e6644 100644
2179 --- a/arch/arm/include/asm/tls.h
2180 +++ b/arch/arm/include/asm/tls.h
2183 #include <linux/compiler.h>
2184 #include <asm/thread_info.h>
2185 +#include <asm/pgtable.h>
2188 #include <asm/asm-offsets.h>
2189 @@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2190 * at 0xffff0fe0 must be used instead. (see
2191 * entry-armv.S for details)
2193 + pax_open_kernel();
2194 *((unsigned int *)0xffff0ff0) = val;
2195 + pax_close_kernel();
2199 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2200 index 74b17d0..57a4bf4 100644
2201 --- a/arch/arm/include/asm/uaccess.h
2202 +++ b/arch/arm/include/asm/uaccess.h
2204 #include <asm/domain.h>
2205 #include <asm/unified.h>
2206 #include <asm/compiler.h>
2207 +#include <asm/pgtable.h>
2209 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2210 #include <asm-generic/uaccess-unaligned.h>
2211 @@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2212 static inline void set_fs(mm_segment_t fs)
2214 current_thread_info()->addr_limit = fs;
2215 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2216 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2219 #define segment_eq(a, b) ((a) == (b))
2221 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2222 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2224 +static inline void pax_open_userland(void)
2227 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2228 + if (segment_eq(get_fs(), USER_DS)) {
2229 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2230 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2236 +static inline void pax_close_userland(void)
2239 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2240 + if (segment_eq(get_fs(), USER_DS)) {
2241 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2242 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2248 #define __addr_ok(addr) ({ \
2249 unsigned long flag; \
2250 __asm__("cmp %2, %0; movlo %0, #0" \
2251 @@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2253 #define get_user(x, p) \
2257 - __get_user_check(x, p); \
2258 + pax_open_userland(); \
2259 + __e = __get_user_check((x), (p)); \
2260 + pax_close_userland(); \
2264 extern int __put_user_1(void *, unsigned int);
2265 @@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2267 #define put_user(x, p) \
2271 - __put_user_check(x, p); \
2272 + pax_open_userland(); \
2273 + __e = __put_user_check((x), (p)); \
2274 + pax_close_userland(); \
2278 #else /* CONFIG_MMU */
2279 @@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2281 #endif /* CONFIG_MMU */
2283 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2284 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2286 #define user_addr_max() \
2287 @@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2288 #define __get_user(x, ptr) \
2290 long __gu_err = 0; \
2291 + pax_open_userland(); \
2292 __get_user_err((x), (ptr), __gu_err); \
2293 + pax_close_userland(); \
2297 #define __get_user_error(x, ptr, err) \
2299 + pax_open_userland(); \
2300 __get_user_err((x), (ptr), err); \
2301 + pax_close_userland(); \
2305 @@ -368,13 +409,17 @@ do { \
2306 #define __put_user(x, ptr) \
2308 long __pu_err = 0; \
2309 + pax_open_userland(); \
2310 __put_user_err((x), (ptr), __pu_err); \
2311 + pax_close_userland(); \
2315 #define __put_user_error(x, ptr, err) \
2317 + pax_open_userland(); \
2318 __put_user_err((x), (ptr), err); \
2319 + pax_close_userland(); \
2323 @@ -474,11 +519,44 @@ do { \
2327 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2328 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2329 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2330 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2332 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2334 + unsigned long ret;
2336 + check_object_size(to, n, false);
2337 + pax_open_userland();
2338 + ret = ___copy_from_user(to, from, n);
2339 + pax_close_userland();
2343 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2345 + unsigned long ret;
2347 + check_object_size(from, n, true);
2348 + pax_open_userland();
2349 + ret = ___copy_to_user(to, from, n);
2350 + pax_close_userland();
2354 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2355 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2356 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2357 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2359 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2361 + unsigned long ret;
2362 + pax_open_userland();
2363 + ret = ___clear_user(addr, n);
2364 + pax_close_userland();
2369 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2370 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2371 @@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2373 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2378 if (access_ok(VERIFY_READ, from, n))
2379 n = __copy_from_user(to, from, n);
2380 else /* security hole - plug it */
2381 @@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2383 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2388 if (access_ok(VERIFY_WRITE, to, n))
2389 n = __copy_to_user(to, from, n);
2391 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2392 index 5af0ed1..cea83883 100644
2393 --- a/arch/arm/include/uapi/asm/ptrace.h
2394 +++ b/arch/arm/include/uapi/asm/ptrace.h
2396 * ARMv7 groups of PSR bits
2398 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2399 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2400 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2401 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2402 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2404 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2405 index a88671c..1cc895e 100644
2406 --- a/arch/arm/kernel/armksyms.c
2407 +++ b/arch/arm/kernel/armksyms.c
2408 @@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2411 EXPORT_SYMBOL(csum_partial);
2412 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2413 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2414 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2415 EXPORT_SYMBOL(__csum_ipv6_magic);
2417 @@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2419 EXPORT_SYMBOL(copy_page);
2421 -EXPORT_SYMBOL(__copy_from_user);
2422 -EXPORT_SYMBOL(__copy_to_user);
2423 -EXPORT_SYMBOL(__clear_user);
2424 +EXPORT_SYMBOL(___copy_from_user);
2425 +EXPORT_SYMBOL(___copy_to_user);
2426 +EXPORT_SYMBOL(___clear_user);
2428 EXPORT_SYMBOL(__get_user_1);
2429 EXPORT_SYMBOL(__get_user_2);
2430 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2431 index 570306c..c87f193 100644
2432 --- a/arch/arm/kernel/entry-armv.S
2433 +++ b/arch/arm/kernel/entry-armv.S
2438 + .macro pax_enter_kernel
2439 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2440 + @ make aligned space for saved DACR
2443 + stmdb sp!, {r1, r2}
2444 + @ read DACR from cpu_domain into r1
2446 + @ assume 8K pages, since we have to split the immediate in two
2447 + bic r2, r2, #(0x1fc0)
2448 + bic r2, r2, #(0x3f)
2449 + ldr r1, [r2, #TI_CPU_DOMAIN]
2450 + @ store old DACR on stack
2452 +#ifdef CONFIG_PAX_KERNEXEC
2453 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2454 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2455 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2457 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2458 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2459 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2461 + @ write r1 to current_thread_info()->cpu_domain
2462 + str r1, [r2, #TI_CPU_DOMAIN]
2463 + @ write r1 to DACR
2464 + mcr p15, 0, r1, c3, c0, 0
2465 + @ instruction sync
2468 + ldmia sp!, {r1, r2}
2472 + .macro pax_open_userland
2473 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2475 + stmdb sp!, {r0, r1}
2476 + @ read DACR from cpu_domain into r1
2478 + @ assume 8K pages, since we have to split the immediate in two
2479 + bic r0, r0, #(0x1fc0)
2480 + bic r0, r0, #(0x3f)
2481 + ldr r1, [r0, #TI_CPU_DOMAIN]
2482 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2483 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2485 + @ write r1 to current_thread_info()->cpu_domain
2486 + str r1, [r0, #TI_CPU_DOMAIN]
2487 + @ write r1 to DACR
2488 + mcr p15, 0, r1, c3, c0, 0
2489 + @ instruction sync
2492 + ldmia sp!, {r0, r1}
2496 + .macro pax_close_userland
2497 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2499 + stmdb sp!, {r0, r1}
2500 + @ read DACR from cpu_domain into r1
2502 + @ assume 8K pages, since we have to split the immediate in two
2503 + bic r0, r0, #(0x1fc0)
2504 + bic r0, r0, #(0x3f)
2505 + ldr r1, [r0, #TI_CPU_DOMAIN]
2506 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2507 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2508 + @ write r1 to current_thread_info()->cpu_domain
2509 + str r1, [r0, #TI_CPU_DOMAIN]
2510 + @ write r1 to DACR
2511 + mcr p15, 0, r1, c3, c0, 0
2512 + @ instruction sync
2515 + ldmia sp!, {r0, r1}
2520 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2522 @@ -90,11 +171,15 @@
2523 * Invalid mode handlers
2525 .macro inv_entry, reason
2529 sub sp, sp, #S_FRAME_SIZE
2530 ARM( stmib sp, {r1 - lr} )
2531 THUMB( stmia sp, {r0 - r12} )
2532 THUMB( str sp, [sp, #S_SP] )
2533 THUMB( str lr, [sp, #S_LR] )
2538 @@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2539 .macro svc_entry, stack_hole=0, trace=1
2541 UNWIND(.save {r0 - pc} )
2545 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2547 #ifdef CONFIG_THUMB2_KERNEL
2548 SPFIX( str r0, [sp] ) @ temporarily saved
2550 @@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2552 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2553 mov r6, #-1 @ "" "" "" ""
2554 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2555 + @ offset sp by 8 as done in pax_enter_kernel
2556 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2558 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2560 SPFIX( addeq r2, r2, #4 )
2561 str r3, [sp, #-4]! @ save the "real" r0 copied
2562 @ from the exception stack
2563 @@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2564 .macro usr_entry, trace=1
2566 UNWIND(.cantunwind ) @ don't unwind the user space
2568 + pax_enter_kernel_user
2570 sub sp, sp, #S_FRAME_SIZE
2571 ARM( stmib sp, {r1 - r12} )
2572 THUMB( stmia sp, {r0 - r12} )
2573 @@ -479,7 +576,9 @@ __und_usr:
2574 tst r3, #PSR_T_BIT @ Thumb mode?
2576 sub r4, r2, #4 @ ARM instr at LR - 4
2579 + pax_close_userland
2580 ARM_BE8(rev r0, r0) @ little endian instruction
2582 @ r0 = 32-bit ARM instruction which caused the exception
2583 @@ -513,11 +612,15 @@ __und_usr_thumb:
2589 + pax_close_userland
2590 ARM_BE8(rev16 r5, r5) @ little endian instruction
2591 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2592 blo __und_usr_fault_16 @ 16bit undefined instruction
2595 + pax_close_userland
2596 ARM_BE8(rev16 r0, r0) @ little endian instruction
2597 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2598 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2599 @@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2601 .pushsection .text.fixup, "ax"
2603 -4: str r4, [sp, #S_PC] @ retry current instruction
2604 +4: pax_close_userland
2605 + str r4, [sp, #S_PC] @ retry current instruction
2608 .pushsection __ex_table,"a"
2609 @@ -767,7 +871,7 @@ ENTRY(__switch_to)
2610 THUMB( str lr, [ip], #4 )
2611 ldr r4, [r2, #TI_TP_VALUE]
2612 ldr r5, [r2, #TI_TP_VALUE + 4]
2613 -#ifdef CONFIG_CPU_USE_DOMAINS
2614 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2615 ldr r6, [r2, #TI_CPU_DOMAIN]
2617 switch_tls r1, r4, r5, r3, r7
2618 @@ -776,7 +880,7 @@ ENTRY(__switch_to)
2619 ldr r8, =__stack_chk_guard
2620 ldr r7, [r7, #TSK_STACK_CANARY]
2622 -#ifdef CONFIG_CPU_USE_DOMAINS
2623 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2624 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2627 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2628 index 4e7f40c..0f9ee2c 100644
2629 --- a/arch/arm/kernel/entry-common.S
2630 +++ b/arch/arm/kernel/entry-common.S
2632 #include <asm/assembler.h>
2633 #include <asm/unistd.h>
2634 #include <asm/ftrace.h>
2635 +#include <asm/domain.h>
2636 #include <asm/unwind.h>
2638 +#include "entry-header.S"
2640 #ifdef CONFIG_NEED_RET_TO_USER
2641 #include <mach/entry-macro.S>
2643 .macro arch_ret_to_user, tmp1, tmp2
2644 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2646 + stmdb sp!, {r1, r2}
2647 + @ read DACR from cpu_domain into r1
2649 + @ assume 8K pages, since we have to split the immediate in two
2650 + bic r2, r2, #(0x1fc0)
2651 + bic r2, r2, #(0x3f)
2652 + ldr r1, [r2, #TI_CPU_DOMAIN]
2653 +#ifdef CONFIG_PAX_KERNEXEC
2654 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2655 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2656 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2658 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2659 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2660 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2661 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2663 + @ write r1 to current_thread_info()->cpu_domain
2664 + str r1, [r2, #TI_CPU_DOMAIN]
2665 + @ write r1 to DACR
2666 + mcr p15, 0, r1, c3, c0, 0
2667 + @ instruction sync
2670 + ldmia sp!, {r1, r2}
2675 -#include "entry-header.S"
2680 * This is the fast syscall return path. We do as little as
2681 @@ -173,6 +201,12 @@ ENTRY(vector_swi)
2682 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2686 + * do this here to avoid a performance hit of wrapping the code above
2687 + * that directly dereferences userland to parse the SWI instruction
2689 + pax_enter_kernel_user
2691 adr tbl, sys_call_table @ load syscall table pointer
2693 #if defined(CONFIG_OABI_COMPAT)
2694 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2695 index 1a0045a..9b4f34d 100644
2696 --- a/arch/arm/kernel/entry-header.S
2697 +++ b/arch/arm/kernel/entry-header.S
2698 @@ -196,6 +196,60 @@
2699 msr cpsr_c, \rtemp @ switch back to the SVC mode
2702 + .macro pax_enter_kernel_user
2703 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2705 + stmdb sp!, {r0, r1}
2706 + @ read DACR from cpu_domain into r1
2708 + @ assume 8K pages, since we have to split the immediate in two
2709 + bic r0, r0, #(0x1fc0)
2710 + bic r0, r0, #(0x3f)
2711 + ldr r1, [r0, #TI_CPU_DOMAIN]
2712 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2713 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2714 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2716 +#ifdef CONFIG_PAX_KERNEXEC
2717 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2718 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2719 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2721 + @ write r1 to current_thread_info()->cpu_domain
2722 + str r1, [r0, #TI_CPU_DOMAIN]
2723 + @ write r1 to DACR
2724 + mcr p15, 0, r1, c3, c0, 0
2725 + @ instruction sync
2728 + ldmia sp!, {r0, r1}
2732 + .macro pax_exit_kernel
2733 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2735 + stmdb sp!, {r0, r1}
2736 + @ read old DACR from stack into r1
2737 + ldr r1, [sp, #(8 + S_SP)]
2741 + @ write r1 to current_thread_info()->cpu_domain
2743 + @ assume 8K pages, since we have to split the immediate in two
2744 + bic r0, r0, #(0x1fc0)
2745 + bic r0, r0, #(0x3f)
2746 + str r1, [r0, #TI_CPU_DOMAIN]
2747 + @ write r1 to DACR
2748 + mcr p15, 0, r1, c3, c0, 0
2749 + @ instruction sync
2752 + ldmia sp!, {r0, r1}
2756 #ifndef CONFIG_THUMB2_KERNEL
2757 .macro svc_exit, rpsr, irq = 0
2760 blne trace_hardirqs_off
2766 msr spsr_cxsf, \rpsr
2767 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2768 @ We must avoid clrex due to Cortex-A15 erratum #830321
2770 blne trace_hardirqs_off
2776 ldr lr, [sp, #S_SP] @ top of the stack
2777 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2779 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2780 index 059c3da..8e45cfc 100644
2781 --- a/arch/arm/kernel/fiq.c
2782 +++ b/arch/arm/kernel/fiq.c
2783 @@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2784 void *base = vectors_page;
2785 unsigned offset = FIQ_OFFSET;
2787 + pax_open_kernel();
2788 memcpy(base + offset, start, length);
2789 + pax_close_kernel();
2791 if (!cache_is_vipt_nonaliasing())
2792 flush_icache_range((unsigned long)base + offset, offset +
2794 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2795 index 3637973..cb29657 100644
2796 --- a/arch/arm/kernel/head.S
2797 +++ b/arch/arm/kernel/head.S
2798 @@ -444,7 +444,7 @@ __enable_mmu:
2799 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2800 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2801 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2802 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2803 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2804 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2805 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2807 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2808 index af791f4..3ff9821 100644
2809 --- a/arch/arm/kernel/module.c
2810 +++ b/arch/arm/kernel/module.c
2815 -void *module_alloc(unsigned long size)
2816 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2818 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2820 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2821 - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2822 + GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2823 __builtin_return_address(0));
2826 +void *module_alloc(unsigned long size)
2829 +#ifdef CONFIG_PAX_KERNEXEC
2830 + return __module_alloc(size, PAGE_KERNEL);
2832 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2837 +#ifdef CONFIG_PAX_KERNEXEC
2838 +void module_memfree_exec(void *module_region)
2840 + module_memfree(module_region);
2842 +EXPORT_SYMBOL(module_memfree_exec);
2844 +void *module_alloc_exec(unsigned long size)
2846 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2848 +EXPORT_SYMBOL(module_alloc_exec);
2853 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2854 index 69bda1a..755113a 100644
2855 --- a/arch/arm/kernel/patch.c
2856 +++ b/arch/arm/kernel/patch.c
2857 @@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2859 __acquire(&patch_lock);
2861 + pax_open_kernel();
2862 if (thumb2 && __opcode_is_thumb16(insn)) {
2863 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2865 @@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2866 *(u32 *)waddr = insn;
2869 + pax_close_kernel();
2871 if (waddr != addr) {
2872 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2873 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2874 index f192a2a..1a40523 100644
2875 --- a/arch/arm/kernel/process.c
2876 +++ b/arch/arm/kernel/process.c
2877 @@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
2879 show_regs_print_info(KERN_DEFAULT);
2881 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2882 - print_symbol("LR is at %s\n", regs->ARM_lr);
2883 + printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2884 + printk("LR is at %pA\n", (void *)regs->ARM_lr);
2885 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2886 "sp : %08lx ip : %08lx fp : %08lx\n",
2887 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2888 @@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
2892 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2894 - unsigned long range_end = mm->brk + 0x02000000;
2895 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2899 #ifdef CONFIG_KUSER_HELPERS
2901 @@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
2903 static int __init gate_vma_init(void)
2905 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2906 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2909 arch_initcall(gate_vma_init);
2910 @@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2911 return is_gate_vma(vma) ? "[vectors]" : NULL;
2914 -/* If possible, provide a placement hint at a random offset from the
2915 - * stack for the sigpage and vdso pages.
2917 -static unsigned long sigpage_addr(const struct mm_struct *mm,
2918 - unsigned int npages)
2920 - unsigned long offset;
2921 - unsigned long first;
2922 - unsigned long last;
2923 - unsigned long addr;
2924 - unsigned int slots;
2926 - first = PAGE_ALIGN(mm->start_stack);
2928 - last = TASK_SIZE - (npages << PAGE_SHIFT);
2930 - /* No room after stack? */
2934 - /* Just enough room? */
2935 - if (first == last)
2938 - slots = ((last - first) >> PAGE_SHIFT) + 1;
2940 - offset = get_random_int() % slots;
2942 - addr = first + (offset << PAGE_SHIFT);
2947 -static struct page *signal_page;
2948 -extern struct page *get_signal_page(void);
2950 -static const struct vm_special_mapping sigpage_mapping = {
2951 - .name = "[sigpage]",
2952 - .pages = &signal_page,
2955 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2957 struct mm_struct *mm = current->mm;
2958 - struct vm_area_struct *vma;
2959 - unsigned long npages;
2960 - unsigned long addr;
2961 - unsigned long hint;
2965 - signal_page = get_signal_page();
2969 - npages = 1; /* for sigpage */
2970 - npages += vdso_total_pages;
2972 down_write(&mm->mmap_sem);
2973 - hint = sigpage_addr(mm, npages);
2974 - addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
2975 - if (IS_ERR_VALUE(addr)) {
2980 - vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2981 - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2982 - &sigpage_mapping);
2984 - if (IS_ERR(vma)) {
2985 - ret = PTR_ERR(vma);
2989 - mm->context.sigpage = addr;
2991 - /* Unlike the sigpage, failure to install the vdso is unlikely
2992 - * to be fatal to the process, so no error check needed
2995 - arm_install_vdso(mm, addr + PAGE_SIZE);
2998 + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2999 up_write(&mm->mmap_sem);
3004 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3005 index f90fdf4..24e8c84 100644
3006 --- a/arch/arm/kernel/psci.c
3007 +++ b/arch/arm/kernel/psci.c
3009 #include <asm/psci.h>
3010 #include <asm/system_misc.h>
3012 -struct psci_operations psci_ops;
3013 +struct psci_operations psci_ops __read_only;
3015 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3016 typedef int (*psci_initcall_t)(const struct device_node *);
3017 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3018 index ef9119f..31995a3 100644
3019 --- a/arch/arm/kernel/ptrace.c
3020 +++ b/arch/arm/kernel/ptrace.c
3021 @@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3025 +#ifdef CONFIG_GRKERNSEC_SETXID
3026 +extern void gr_delayed_cred_worker(void);
3029 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3031 current_thread_info()->syscall = scno;
3033 +#ifdef CONFIG_GRKERNSEC_SETXID
3034 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3035 + gr_delayed_cred_worker();
3038 /* Do the secure computing check first; failures should be fast. */
3039 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3040 if (secure_computing() == -1)
3041 diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
3042 index 1a4d232..2677169 100644
3043 --- a/arch/arm/kernel/reboot.c
3044 +++ b/arch/arm/kernel/reboot.c
3045 @@ -122,6 +122,7 @@ void machine_power_off(void)
3053 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3054 index 6c777e9..3d2d0ca 100644
3055 --- a/arch/arm/kernel/setup.c
3056 +++ b/arch/arm/kernel/setup.c
3057 @@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3058 unsigned int elf_hwcap2 __read_mostly;
3059 EXPORT_SYMBOL(elf_hwcap2);
3061 +pteval_t __supported_pte_mask __read_only;
3062 +pmdval_t __supported_pmd_mask __read_only;
3065 -struct processor processor __read_mostly;
3066 +struct processor processor __read_only;
3069 -struct cpu_tlb_fns cpu_tlb __read_mostly;
3070 +struct cpu_tlb_fns cpu_tlb __read_only;
3073 -struct cpu_user_fns cpu_user __read_mostly;
3074 +struct cpu_user_fns cpu_user __read_only;
3077 -struct cpu_cache_fns cpu_cache __read_mostly;
3078 +struct cpu_cache_fns cpu_cache __read_only;
3080 #ifdef CONFIG_OUTER_CACHE
3081 -struct outer_cache_fns outer_cache __read_mostly;
3082 +struct outer_cache_fns outer_cache __read_only;
3083 EXPORT_SYMBOL(outer_cache);
3086 @@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3087 * Register 0 and check for VMSAv7 or PMSAv7 */
3088 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3089 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3090 - (mmfr0 & 0x000000f0) >= 0x00000030)
3091 + (mmfr0 & 0x000000f0) >= 0x00000030) {
3092 cpu_arch = CPU_ARCH_ARMv7;
3093 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3094 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3095 + __supported_pte_mask |= L_PTE_PXN;
3096 + __supported_pmd_mask |= PMD_PXNTABLE;
3098 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3099 (mmfr0 & 0x000000f0) == 0x00000020)
3100 cpu_arch = CPU_ARCH_ARMv6;
3102 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3103 index 423663e..bfeb0ff 100644
3104 --- a/arch/arm/kernel/signal.c
3105 +++ b/arch/arm/kernel/signal.c
3108 extern const unsigned long sigreturn_codes[7];
3110 -static unsigned long signal_return_offset;
3112 #ifdef CONFIG_CRUNCH
3113 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3115 @@ -385,8 +383,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3116 * except when the MPU has protected the vectors
3119 - retcode = mm->context.sigpage + signal_return_offset +
3120 - (idx << 2) + thumb;
3121 + retcode = mm->context.sigpage + (idx << 2) + thumb;
3125 @@ -592,33 +589,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3126 } while (thread_flags & _TIF_WORK_MASK);
3130 -struct page *get_signal_page(void)
3132 - unsigned long ptr;
3134 - struct page *page;
3137 - page = alloc_pages(GFP_KERNEL, 0);
3142 - addr = page_address(page);
3144 - /* Give the signal return code some randomness */
3145 - offset = 0x200 + (get_random_int() & 0x7fc);
3146 - signal_return_offset = offset;
3149 - * Copy signal return handlers into the vector page, and
3150 - * set sigreturn to be a pointer to these.
3152 - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3154 - ptr = (unsigned long)addr + offset;
3155 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3159 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3160 index cca5b87..68f0f73 100644
3161 --- a/arch/arm/kernel/smp.c
3162 +++ b/arch/arm/kernel/smp.c
3163 @@ -76,7 +76,7 @@ enum ipi_msg_type {
3165 static DECLARE_COMPLETION(cpu_running);
3167 -static struct smp_operations smp_ops;
3168 +static struct smp_operations smp_ops __read_only;
3170 void __init smp_set_ops(struct smp_operations *ops)
3172 diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3173 index 7a3be1d..b00c7de 100644
3174 --- a/arch/arm/kernel/tcm.c
3175 +++ b/arch/arm/kernel/tcm.c
3176 @@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3177 .virtual = ITCM_OFFSET,
3178 .pfn = __phys_to_pfn(ITCM_OFFSET),
3180 - .type = MT_MEMORY_RWX_ITCM,
3181 + .type = MT_MEMORY_RX_ITCM,
3185 @@ -267,7 +267,9 @@ no_dtcm:
3186 start = &__sitcm_text;
3187 end = &__eitcm_text;
3188 ram = &__itcm_start;
3189 + pax_open_kernel();
3190 memcpy(start, ram, itcm_code_sz);
3191 + pax_close_kernel();
3192 pr_debug("CPU ITCM: copied code from %p - %p\n",
3194 itcm_present = true;
3195 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3196 index 3dce1a3..60e857f 100644
3197 --- a/arch/arm/kernel/traps.c
3198 +++ b/arch/arm/kernel/traps.c
3199 @@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3200 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3202 #ifdef CONFIG_KALLSYMS
3203 - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3204 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3206 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3208 @@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3209 static int die_owner = -1;
3210 static unsigned int die_nest_count;
3212 +extern void gr_handle_kernel_exploit(void);
3214 static unsigned long oops_begin(void)
3217 @@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3218 panic("Fatal exception in interrupt");
3220 panic("Fatal exception");
3222 + gr_handle_kernel_exploit();
3227 @@ -878,7 +883,11 @@ void __init early_trap_init(void *vectors_base)
3228 kuser_init(vectors_base);
3230 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3231 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3233 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3234 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3237 #else /* ifndef CONFIG_CPU_V7M */
3239 * on V7-M there is no need to copy the vector table to a dedicated
3240 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3241 index 8b60fde..8d986dd 100644
3242 --- a/arch/arm/kernel/vmlinux.lds.S
3243 +++ b/arch/arm/kernel/vmlinux.lds.S
3247 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3248 - defined(CONFIG_GENERIC_BUG)
3249 + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3250 #define ARM_EXIT_KEEP(x) x
3251 #define ARM_EXIT_DISCARD(x)
3253 @@ -120,6 +120,8 @@ SECTIONS
3254 #ifdef CONFIG_DEBUG_RODATA
3255 . = ALIGN(1<<SECTION_SHIFT);
3257 + _etext = .; /* End of text section */
3262 @@ -150,8 +152,6 @@ SECTIONS
3266 - _etext = .; /* End of text and rodata section */
3268 #ifndef CONFIG_XIP_KERNEL
3269 # ifdef CONFIG_ARM_KERNMEM_PERMS
3270 . = ALIGN(1<<SECTION_SHIFT);
3271 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3272 index d9631ec..b0c966c 100644
3273 --- a/arch/arm/kvm/arm.c
3274 +++ b/arch/arm/kvm/arm.c
3275 @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3276 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3278 /* The VMID used in the VTTBR */
3279 -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3280 +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3281 static u8 kvm_next_vmid;
3282 static DEFINE_SPINLOCK(kvm_vmid_lock);
3284 @@ -373,7 +373,7 @@ void force_vm_exit(const cpumask_t *mask)
3286 static bool need_new_vmid_gen(struct kvm *kvm)
3288 - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3289 + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3293 @@ -406,7 +406,7 @@ static void update_vttbr(struct kvm *kvm)
3295 /* First user of a new VMID generation? */
3296 if (unlikely(kvm_next_vmid == 0)) {
3297 - atomic64_inc(&kvm_vmid_gen);
3298 + atomic64_inc_unchecked(&kvm_vmid_gen);
3302 @@ -423,7 +423,7 @@ static void update_vttbr(struct kvm *kvm)
3303 kvm_call_hyp(__kvm_flush_vm_context);
3306 - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3307 + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3308 kvm->arch.vmid = kvm_next_vmid;
3311 @@ -1098,7 +1098,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3313 * Initialize Hyp-mode and memory mappings on all CPUs.
3315 -int kvm_arch_init(void *opaque)
3316 +int kvm_arch_init(const void *opaque)
3320 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3321 index 1710fd7..ec3e014 100644
3322 --- a/arch/arm/lib/clear_user.S
3323 +++ b/arch/arm/lib/clear_user.S
3328 -/* Prototype: int __clear_user(void *addr, size_t sz)
3329 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3330 * Purpose : clear some user memory
3331 * Params : addr - user memory address to clear
3332 * : sz - number of bytes to clear
3333 * Returns : number of bytes NOT cleared
3335 ENTRY(__clear_user_std)
3337 +WEAK(___clear_user)
3341 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3342 USER( strnebt r2, [r0])
3345 -ENDPROC(__clear_user)
3346 +ENDPROC(___clear_user)
3347 ENDPROC(__clear_user_std)
3349 .pushsection .text.fixup,"ax"
3350 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3351 index 7a235b9..73a0556 100644
3352 --- a/arch/arm/lib/copy_from_user.S
3353 +++ b/arch/arm/lib/copy_from_user.S
3358 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3359 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3367 -ENTRY(__copy_from_user)
3368 +ENTRY(___copy_from_user)
3370 #include "copy_template.S"
3372 -ENDPROC(__copy_from_user)
3373 +ENDPROC(___copy_from_user)
3375 .pushsection .fixup,"ax"
3377 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3378 index 6ee2f67..d1cce76 100644
3379 --- a/arch/arm/lib/copy_page.S
3380 +++ b/arch/arm/lib/copy_page.S
3382 * ASM optimised string functions
3384 #include <linux/linkage.h>
3385 +#include <linux/const.h>
3386 #include <asm/assembler.h>
3387 #include <asm/asm-offsets.h>
3388 #include <asm/cache.h>
3389 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3390 index 9648b06..19c333c 100644
3391 --- a/arch/arm/lib/copy_to_user.S
3392 +++ b/arch/arm/lib/copy_to_user.S
3397 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3398 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3405 ENTRY(__copy_to_user_std)
3406 -WEAK(__copy_to_user)
3407 +WEAK(___copy_to_user)
3409 #include "copy_template.S"
3411 -ENDPROC(__copy_to_user)
3412 +ENDPROC(___copy_to_user)
3413 ENDPROC(__copy_to_user_std)
3415 .pushsection .text.fixup,"ax"
3416 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3417 index 1d0957e..f708846 100644
3418 --- a/arch/arm/lib/csumpartialcopyuser.S
3419 +++ b/arch/arm/lib/csumpartialcopyuser.S
3421 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3424 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3425 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3426 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3427 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3429 #include "csumpartialcopygeneric.S"
3431 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3432 index 8044591..c9b2609 100644
3433 --- a/arch/arm/lib/delay.c
3434 +++ b/arch/arm/lib/delay.c
3437 * Default to the loop-based delay implementation.
3439 -struct arm_delay_ops arm_delay_ops = {
3440 +struct arm_delay_ops arm_delay_ops __read_only = {
3441 .delay = __loop_delay,
3442 .const_udelay = __loop_const_udelay,
3443 .udelay = __loop_udelay,
3444 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3445 index 3e58d71..029817c 100644
3446 --- a/arch/arm/lib/uaccess_with_memcpy.c
3447 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3448 @@ -136,7 +136,7 @@ out:
3452 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3453 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3456 * This test is stubbed out of the main function above to keep
3457 @@ -190,7 +190,7 @@ out:
3461 -unsigned long __clear_user(void __user *addr, unsigned long n)
3462 +unsigned long ___clear_user(void __user *addr, unsigned long n)
3464 /* See rational for this in __copy_to_user() above. */
3466 diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3467 index 7d23ce0..5ef383a 100644
3468 --- a/arch/arm/mach-exynos/suspend.c
3469 +++ b/arch/arm/mach-exynos/suspend.c
3470 @@ -738,8 +738,10 @@ void __init exynos_pm_init(void)
3471 tmp |= pm_data->wake_disable_mask;
3472 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3474 - exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3475 - exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3476 + pax_open_kernel();
3477 + *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3478 + *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3479 + pax_close_kernel();
3481 register_syscore_ops(&exynos_pm_syscore_ops);
3482 suspend_set_ops(&exynos_suspend_ops);
3483 diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3484 index 0662087..004d163 100644
3485 --- a/arch/arm/mach-keystone/keystone.c
3486 +++ b/arch/arm/mach-keystone/keystone.c
3489 #include "keystone.h"
3491 -static struct notifier_block platform_nb;
3492 +static notifier_block_no_const platform_nb;
3493 static unsigned long keystone_dma_pfn_offset __read_mostly;
3495 static int keystone_platform_notifier(struct notifier_block *nb,
3496 diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3497 index e46e9ea..9141c83 100644
3498 --- a/arch/arm/mach-mvebu/coherency.c
3499 +++ b/arch/arm/mach-mvebu/coherency.c
3500 @@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3503 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3504 - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3505 + * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3506 * is needed as a workaround for a deadlock issue between the PCIe
3507 * interface and the cache controller.
3509 @@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3510 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3512 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3513 - mtype = MT_UNCACHED;
3514 + mtype = MT_UNCACHED_RW;
3516 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3518 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3519 index b6443a4..20a0b74 100644
3520 --- a/arch/arm/mach-omap2/board-n8x0.c
3521 +++ b/arch/arm/mach-omap2/board-n8x0.c
3522 @@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3526 -struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3527 +struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3528 .late_init = n8x0_menelaus_late_init,
3531 diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3532 index 79f49d9..70bf184 100644
3533 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3534 +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3535 @@ -86,7 +86,7 @@ struct cpu_pm_ops {
3536 void (*resume)(void);
3537 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3538 void (*hotplug_restart)(void);
3542 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3543 static struct powerdomain *mpuss_pd;
3544 @@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3545 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3548 -struct cpu_pm_ops omap_pm_ops = {
3549 +static struct cpu_pm_ops omap_pm_ops __read_only = {
3550 .finish_suspend = default_finish_suspend,
3551 .resume = dummy_cpu_resume,
3552 .scu_prepare = dummy_scu_prepare,
3553 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3554 index 5305ec7..6d74045 100644
3555 --- a/arch/arm/mach-omap2/omap-smp.c
3556 +++ b/arch/arm/mach-omap2/omap-smp.c
3558 #include <linux/device.h>
3559 #include <linux/smp.h>
3560 #include <linux/io.h>
3561 +#include <linux/irq.h>
3562 #include <linux/irqchip/arm-gic.h>
3564 #include <asm/smp_scu.h>
3565 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3566 index 3b56722..33ac281 100644
3567 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3568 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3569 @@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3573 -static struct notifier_block __refdata irq_hotplug_notifier = {
3574 +static struct notifier_block irq_hotplug_notifier = {
3575 .notifier_call = irq_cpu_hotplug_notify,
3578 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3579 index 166b18f..f985f04 100644
3580 --- a/arch/arm/mach-omap2/omap_device.c
3581 +++ b/arch/arm/mach-omap2/omap_device.c
3582 @@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3583 struct platform_device __init *omap_device_build(const char *pdev_name,
3585 struct omap_hwmod *oh,
3586 - void *pdata, int pdata_len)
3587 + const void *pdata, int pdata_len)
3589 struct omap_hwmod *ohs[] = { oh };
3591 @@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3592 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3594 struct omap_hwmod **ohs,
3595 - int oh_cnt, void *pdata,
3596 + int oh_cnt, const void *pdata,
3600 diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3601 index 78c02b3..c94109a 100644
3602 --- a/arch/arm/mach-omap2/omap_device.h
3603 +++ b/arch/arm/mach-omap2/omap_device.h
3604 @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3605 /* Core code interface */
3607 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3608 - struct omap_hwmod *oh, void *pdata,
3609 + struct omap_hwmod *oh, const void *pdata,
3612 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3613 struct omap_hwmod **oh, int oh_cnt,
3614 - void *pdata, int pdata_len);
3615 + const void *pdata, int pdata_len);
3617 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3618 struct omap_hwmod **ohs, int oh_cnt);
3619 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3620 index 752969f..a34b446 100644
3621 --- a/arch/arm/mach-omap2/omap_hwmod.c
3622 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3623 @@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
3624 int (*init_clkdm)(struct omap_hwmod *oh);
3625 void (*update_context_lost)(struct omap_hwmod *oh);
3626 int (*get_context_lost)(struct omap_hwmod *oh);
3630 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3631 -static struct omap_hwmod_soc_ops soc_ops;
3632 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3634 /* omap_hwmod_list contains all registered struct omap_hwmods */
3635 static LIST_HEAD(omap_hwmod_list);
3636 diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3637 index 95fee54..cfa9cf1 100644
3638 --- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3639 +++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3642 #include <linux/kernel.h>
3643 #include <linux/init.h>
3644 +#include <asm/pgtable.h>
3646 #include "powerdomain.h"
3648 @@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3650 void __init am43xx_powerdomains_init(void)
3652 - omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3653 + pax_open_kernel();
3654 + *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3655 + pax_close_kernel();
3656 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3657 pwrdm_register_pwrdms(powerdomains_am43xx);
3658 pwrdm_complete_init();
3659 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3660 index ff0a68c..b312aa0 100644
3661 --- a/arch/arm/mach-omap2/wd_timer.c
3662 +++ b/arch/arm/mach-omap2/wd_timer.c
3663 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3664 struct omap_hwmod *oh;
3665 char *oh_name = "wd_timer2";
3666 char *dev_name = "omap_wdt";
3667 - struct omap_wd_timer_platform_data pdata;
3668 + static struct omap_wd_timer_platform_data pdata = {
3669 + .read_reset_sources = prm_read_reset_sources
3672 if (!cpu_class_is_omap2() || of_have_populated_dt())
3674 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3678 - pdata.read_reset_sources = prm_read_reset_sources;
3680 pdev = omap_device_build(dev_name, id, oh, &pdata,
3681 sizeof(struct omap_wd_timer_platform_data));
3682 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3683 diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3684 index 7469347..1ecc350 100644
3685 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3686 +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3687 @@ -177,7 +177,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3688 bool entered_lp2 = false;
3690 if (tegra_pending_sgi())
3691 - ACCESS_ONCE(abort_flag) = true;
3692 + ACCESS_ONCE_RW(abort_flag) = true;
3694 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3696 diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3697 index 3b9098d..15b390f 100644
3698 --- a/arch/arm/mach-tegra/irq.c
3699 +++ b/arch/arm/mach-tegra/irq.c
3701 #include <linux/cpu_pm.h>
3702 #include <linux/interrupt.h>
3703 #include <linux/io.h>
3704 +#include <linux/irq.h>
3705 #include <linux/irqchip/arm-gic.h>
3706 #include <linux/irq.h>
3707 #include <linux/kernel.h>
3708 diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3709 index 2cb587b..6ddfebf 100644
3710 --- a/arch/arm/mach-ux500/pm.c
3711 +++ b/arch/arm/mach-ux500/pm.c
3715 #include <linux/kernel.h>
3716 +#include <linux/irq.h>
3717 #include <linux/irqchip/arm-gic.h>
3718 #include <linux/delay.h>
3719 #include <linux/io.h>
3720 diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3721 index 2dea8b5..6499da2 100644
3722 --- a/arch/arm/mach-ux500/setup.h
3723 +++ b/arch/arm/mach-ux500/setup.h
3724 @@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3725 .type = MT_DEVICE, \
3728 -#define __MEM_DEV_DESC(x, sz) { \
3729 - .virtual = IO_ADDRESS(x), \
3730 - .pfn = __phys_to_pfn(x), \
3732 - .type = MT_MEMORY_RWX, \
3735 extern struct smp_operations ux500_smp_ops;
3736 extern void ux500_cpu_die(unsigned int cpu);
3738 diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3739 index 52d768f..5f93180 100644
3740 --- a/arch/arm/mach-zynq/platsmp.c
3741 +++ b/arch/arm/mach-zynq/platsmp.c
3743 #include <linux/io.h>
3744 #include <asm/cacheflush.h>
3745 #include <asm/smp_scu.h>
3746 +#include <linux/irq.h>
3747 #include <linux/irqchip/arm-gic.h>
3750 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3751 index b4f92b9..ffefea9 100644
3752 --- a/arch/arm/mm/Kconfig
3753 +++ b/arch/arm/mm/Kconfig
3754 @@ -446,6 +446,7 @@ config CPU_32v5
3758 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3759 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3762 @@ -600,6 +601,7 @@ config CPU_CP15_MPU
3764 config CPU_USE_DOMAINS
3766 + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3768 This option enables or disables the use of domain switching
3769 via the set_fs() function.
3770 @@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3772 config KUSER_HELPERS
3773 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3775 + depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3778 Warning: disabling this option may break user programs.
3779 @@ -812,7 +814,7 @@ config KUSER_HELPERS
3780 See Documentation/arm/kernel_user_helpers.txt for details.
3782 However, the fixed address nature of these helpers can be used
3783 - by ROP (return orientated programming) authors when creating
3784 + by ROP (Return Oriented Programming) authors when creating
3787 If all of the binaries and libraries which run on your platform
3788 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3789 index 9769f1e..16aaa55 100644
3790 --- a/arch/arm/mm/alignment.c
3791 +++ b/arch/arm/mm/alignment.c
3792 @@ -216,10 +216,12 @@ union offset_union {
3793 #define __get16_unaligned_check(ins,val,addr) \
3795 unsigned int err = 0, v, a = addr; \
3796 + pax_open_userland(); \
3797 __get8_unaligned_check(ins,v,a,err); \
3798 val = v << ((BE) ? 8 : 0); \
3799 __get8_unaligned_check(ins,v,a,err); \
3800 val |= v << ((BE) ? 0 : 8); \
3801 + pax_close_userland(); \
3805 @@ -233,6 +235,7 @@ union offset_union {
3806 #define __get32_unaligned_check(ins,val,addr) \
3808 unsigned int err = 0, v, a = addr; \
3809 + pax_open_userland(); \
3810 __get8_unaligned_check(ins,v,a,err); \
3811 val = v << ((BE) ? 24 : 0); \
3812 __get8_unaligned_check(ins,v,a,err); \
3813 @@ -241,6 +244,7 @@ union offset_union {
3814 val |= v << ((BE) ? 8 : 16); \
3815 __get8_unaligned_check(ins,v,a,err); \
3816 val |= v << ((BE) ? 0 : 24); \
3817 + pax_close_userland(); \
3821 @@ -254,6 +258,7 @@ union offset_union {
3822 #define __put16_unaligned_check(ins,val,addr) \
3824 unsigned int err = 0, v = val, a = addr; \
3825 + pax_open_userland(); \
3826 __asm__( FIRST_BYTE_16 \
3827 ARM( "1: "ins" %1, [%2], #1\n" ) \
3828 THUMB( "1: "ins" %1, [%2]\n" ) \
3829 @@ -273,6 +278,7 @@ union offset_union {
3831 : "=r" (err), "=&r" (v), "=&r" (a) \
3832 : "0" (err), "1" (v), "2" (a)); \
3833 + pax_close_userland(); \
3837 @@ -286,6 +292,7 @@ union offset_union {
3838 #define __put32_unaligned_check(ins,val,addr) \
3840 unsigned int err = 0, v = val, a = addr; \
3841 + pax_open_userland(); \
3842 __asm__( FIRST_BYTE_32 \
3843 ARM( "1: "ins" %1, [%2], #1\n" ) \
3844 THUMB( "1: "ins" %1, [%2]\n" ) \
3845 @@ -315,6 +322,7 @@ union offset_union {
3847 : "=r" (err), "=&r" (v), "=&r" (a) \
3848 : "0" (err), "1" (v), "2" (a)); \
3849 + pax_close_userland(); \
3853 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3854 index e309c8f..f8965e8 100644
3855 --- a/arch/arm/mm/cache-l2x0.c
3856 +++ b/arch/arm/mm/cache-l2x0.c
3857 @@ -43,7 +43,7 @@ struct l2c_init_data {
3858 void (*save)(void __iomem *);
3859 void (*configure)(void __iomem *);
3860 struct outer_cache_fns outer_cache;
3864 #define CACHE_LINE_SIZE 32
3866 diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3867 index 845769e..4278fd7 100644
3868 --- a/arch/arm/mm/context.c
3869 +++ b/arch/arm/mm/context.c
3871 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3873 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3874 -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3875 +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3876 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3878 static DEFINE_PER_CPU(atomic64_t, active_asids);
3879 @@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3881 static u32 cur_idx = 1;
3882 u64 asid = atomic64_read(&mm->context.id);
3883 - u64 generation = atomic64_read(&asid_generation);
3884 + u64 generation = atomic64_read_unchecked(&asid_generation);
3888 @@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3890 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3891 if (asid == NUM_USER_ASIDS) {
3892 - generation = atomic64_add_return(ASID_FIRST_VERSION,
3893 + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3896 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3897 @@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3898 cpu_set_reserved_ttbr0();
3900 asid = atomic64_read(&mm->context.id);
3901 - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3902 + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3903 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3904 goto switch_mm_fastpath;
3906 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3907 /* Check that our ASID belongs to the current generation. */
3908 asid = atomic64_read(&mm->context.id);
3909 - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3910 + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3911 asid = new_context(mm, cpu);
3912 atomic64_set(&mm->context.id, asid);
3914 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3915 index 6333d9c..3bb19f2 100644
3916 --- a/arch/arm/mm/fault.c
3917 +++ b/arch/arm/mm/fault.c
3919 #include <asm/system_misc.h>
3920 #include <asm/system_info.h>
3921 #include <asm/tlbflush.h>
3922 +#include <asm/sections.h>
3926 @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3927 if (fixup_exception(regs))
3930 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3931 + if (addr < TASK_SIZE) {
3932 + if (current->signal->curr_ip)
3933 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
3934 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3936 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3937 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3941 +#ifdef CONFIG_PAX_KERNEXEC
3942 + if ((fsr & FSR_WRITE) &&
3943 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3944 + (MODULES_VADDR <= addr && addr < MODULES_END)))
3946 + if (current->signal->curr_ip)
3947 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
3948 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3950 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3951 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3956 * No handler, we'll have to terminate things with extreme prejudice.
3958 @@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3962 +#ifdef CONFIG_PAX_PAGEEXEC
3963 + if (fsr & FSR_LNX_PF) {
3964 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3965 + do_group_exit(SIGKILL);
3969 tsk->thread.address = addr;
3970 tsk->thread.error_code = fsr;
3971 tsk->thread.trap_no = 14;
3972 @@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3974 #endif /* CONFIG_MMU */
3976 +#ifdef CONFIG_PAX_PAGEEXEC
3977 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3981 + printk(KERN_ERR "PAX: bytes at PC: ");
3982 + for (i = 0; i < 20; i++) {
3984 + if (get_user(c, (__force unsigned char __user *)pc+i))
3985 + printk(KERN_CONT "?? ");
3987 + printk(KERN_CONT "%02x ", c);
3991 + printk(KERN_ERR "PAX: bytes at SP-4: ");
3992 + for (i = -1; i < 20; i++) {
3994 + if (get_user(c, (__force unsigned long __user *)sp+i))
3995 + printk(KERN_CONT "???????? ");
3997 + printk(KERN_CONT "%08lx ", c);
4004 * First Level Translation Fault Handler
4006 @@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4007 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4008 struct siginfo info;
4010 +#ifdef CONFIG_PAX_MEMORY_UDEREF
4011 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4012 + if (current->signal->curr_ip)
4013 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4014 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4016 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4017 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4022 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4026 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4027 inf->name, fsr, addr);
4028 show_pte(current->mm, addr);
4029 @@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4030 ifsr_info[nr].name = name;
4033 +asmlinkage int sys_sigreturn(struct pt_regs *regs);
4034 +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4036 asmlinkage void __exception
4037 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4039 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4040 struct siginfo info;
4041 + unsigned long pc = instruction_pointer(regs);
4043 + if (user_mode(regs)) {
4044 + unsigned long sigpage = current->mm->context.sigpage;
4046 + if (sigpage <= pc && pc < sigpage + 7*4) {
4047 + if (pc < sigpage + 3*4)
4048 + sys_sigreturn(regs);
4050 + sys_rt_sigreturn(regs);
4053 + if (pc == 0xffff0f60UL) {
4055 + * PaX: __kuser_cmpxchg64 emulation
4058 + //regs->ARM_pc = regs->ARM_lr;
4061 + if (pc == 0xffff0fa0UL) {
4063 + * PaX: __kuser_memory_barrier emulation
4065 + // dmb(); implied by the exception
4066 + regs->ARM_pc = regs->ARM_lr;
4069 + if (pc == 0xffff0fc0UL) {
4071 + * PaX: __kuser_cmpxchg emulation
4077 + //op = FUTEX_OP_SET << 28;
4078 + //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4079 + //regs->ARM_r0 = old != new;
4080 + //regs->ARM_pc = regs->ARM_lr;
4083 + if (pc == 0xffff0fe0UL) {
4085 + * PaX: __kuser_get_tls emulation
4087 + regs->ARM_r0 = current_thread_info()->tp_value[0];
4088 + regs->ARM_pc = regs->ARM_lr;
4093 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4094 + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4095 + if (current->signal->curr_ip)
4096 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4097 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4098 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4100 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4101 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4102 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4107 +#ifdef CONFIG_PAX_REFCOUNT
4108 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4109 +#ifdef CONFIG_THUMB2_KERNEL
4110 + unsigned short bkpt;
4112 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4114 + unsigned int bkpt;
4116 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4118 + current->thread.error_code = ifsr;
4119 + current->thread.trap_no = 0;
4120 + pax_report_refcount_overflow(regs);
4121 + fixup_exception(regs);
4127 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4131 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4132 inf->name, ifsr, addr);
4134 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4135 index cf08bdf..772656c 100644
4136 --- a/arch/arm/mm/fault.h
4137 +++ b/arch/arm/mm/fault.h
4141 * Fault status register encodings. We steal bit 31 for our own purposes.
4142 + * Set when the FSR value is from an instruction fault.
4144 #define FSR_LNX_PF (1 << 31)
4145 #define FSR_WRITE (1 << 11)
4146 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4150 +/* valid for LPAE and !LPAE */
4151 +static inline int is_xn_fault(unsigned int fsr)
4153 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
4156 +static inline int is_domain_fault(unsigned int fsr)
4158 + return ((fsr_fs(fsr) & 0xD) == 0x9);
4161 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4162 unsigned long search_exception_table(unsigned long addr);
4164 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4165 index be92fa0..5252d7e 100644
4166 --- a/arch/arm/mm/init.c
4167 +++ b/arch/arm/mm/init.c
4168 @@ -709,7 +709,46 @@ void free_tcmmem(void)
4170 #ifdef CONFIG_HAVE_TCM
4171 extern char __tcm_start, __tcm_end;
4174 +#ifdef CONFIG_PAX_KERNEXEC
4175 + unsigned long addr;
4179 + int cpu_arch = cpu_architecture();
4180 + unsigned int cr = get_cr();
4182 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4183 + /* make pages tables, etc before .text NX */
4184 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4185 + pgd = pgd_offset_k(addr);
4186 + pud = pud_offset(pgd, addr);
4187 + pmd = pmd_offset(pud, addr);
4188 + __section_update(pmd, addr, PMD_SECT_XN);
4190 + /* make init NX */
4191 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4192 + pgd = pgd_offset_k(addr);
4193 + pud = pud_offset(pgd, addr);
4194 + pmd = pmd_offset(pud, addr);
4195 + __section_update(pmd, addr, PMD_SECT_XN);
4197 + /* make kernel code/rodata RX */
4198 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4199 + pgd = pgd_offset_k(addr);
4200 + pud = pud_offset(pgd, addr);
4201 + pmd = pmd_offset(pud, addr);
4202 +#ifdef CONFIG_ARM_LPAE
4203 + __section_update(pmd, addr, PMD_SECT_RDONLY);
4205 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4211 +#ifdef CONFIG_HAVE_TCM
4212 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4213 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4215 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4216 index d1e5ad7..84dcbf2 100644
4217 --- a/arch/arm/mm/ioremap.c
4218 +++ b/arch/arm/mm/ioremap.c
4219 @@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4223 - mtype = MT_MEMORY_RWX;
4224 + mtype = MT_MEMORY_RX;
4226 - mtype = MT_MEMORY_RWX_NONCACHED;
4227 + mtype = MT_MEMORY_RX_NONCACHED;
4229 return __arm_ioremap_caller(phys_addr, size, mtype,
4230 __builtin_return_address(0));
4231 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4232 index 407dc78..047ce9d 100644
4233 --- a/arch/arm/mm/mmap.c
4234 +++ b/arch/arm/mm/mmap.c
4235 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 struct vm_area_struct *vma;
4238 int aliasing = cache_is_vipt_aliasing();
4239 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4240 struct vm_unmapped_area_info info;
4243 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 if (len > TASK_SIZE)
4247 +#ifdef CONFIG_PAX_RANDMMAP
4248 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4253 addr = COLOUR_ALIGN(addr, pgoff);
4254 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4255 addr = PAGE_ALIGN(addr);
4257 vma = find_vma(mm, addr);
4258 - if (TASK_SIZE - len >= addr &&
4259 - (!vma || addr + len <= vma->vm_start))
4260 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4264 @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4265 info.high_limit = TASK_SIZE;
4266 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4267 info.align_offset = pgoff << PAGE_SHIFT;
4268 + info.threadstack_offset = offset;
4269 return vm_unmapped_area(&info);
4272 @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 unsigned long addr = addr0;
4275 int aliasing = cache_is_vipt_aliasing();
4276 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4277 struct vm_unmapped_area_info info;
4280 @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4284 +#ifdef CONFIG_PAX_RANDMMAP
4285 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4288 /* requesting a specific address */
4291 @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4293 addr = PAGE_ALIGN(addr);
4294 vma = find_vma(mm, addr);
4295 - if (TASK_SIZE - len >= addr &&
4296 - (!vma || addr + len <= vma->vm_start))
4297 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4301 @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4302 info.high_limit = mm->mmap_base;
4303 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4304 info.align_offset = pgoff << PAGE_SHIFT;
4305 + info.threadstack_offset = offset;
4306 addr = vm_unmapped_area(&info);
4309 @@ -183,14 +193,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4311 unsigned long random_factor = 0UL;
4313 +#ifdef CONFIG_PAX_RANDMMAP
4314 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4317 if (current->flags & PF_RANDOMIZE)
4318 random_factor = arch_mmap_rnd();
4320 if (mmap_is_legacy()) {
4321 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4323 +#ifdef CONFIG_PAX_RANDMMAP
4324 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4325 + mm->mmap_base += mm->delta_mmap;
4328 mm->get_unmapped_area = arch_get_unmapped_area;
4330 mm->mmap_base = mmap_base(random_factor);
4332 +#ifdef CONFIG_PAX_RANDMMAP
4333 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4334 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4337 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4340 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4341 index 7186382..0c145cf 100644
4342 --- a/arch/arm/mm/mmu.c
4343 +++ b/arch/arm/mm/mmu.c
4348 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4349 +void modify_domain(unsigned int dom, unsigned int type)
4351 + struct thread_info *thread = current_thread_info();
4352 + unsigned int domain = thread->cpu_domain;
4354 + * DOMAIN_MANAGER might be defined to some other value,
4355 + * use the arch-defined constant
4357 + domain &= ~domain_val(dom, 3);
4358 + thread->cpu_domain = domain | domain_val(dom, type);
4359 + set_domain(thread->cpu_domain);
4361 +EXPORT_SYMBOL(modify_domain);
4365 * empty_zero_page is a special page that is used for
4366 * zero-initialized data and COW.
4367 @@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4368 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4369 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4371 -static struct mem_type mem_types[] = {
4372 +#ifdef CONFIG_PAX_KERNEXEC
4373 +#define L_PTE_KERNEXEC L_PTE_RDONLY
4374 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4376 +#define L_PTE_KERNEXEC L_PTE_DIRTY
4377 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4380 +static struct mem_type mem_types[] __read_only = {
4381 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4382 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4384 @@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4385 .prot_sect = PROT_SECT_DEVICE,
4386 .domain = DOMAIN_IO,
4389 + [MT_UNCACHED_RW] = {
4390 .prot_pte = PROT_PTE_DEVICE,
4391 .prot_l1 = PMD_TYPE_TABLE,
4392 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4393 .domain = DOMAIN_IO,
4395 - [MT_CACHECLEAN] = {
4396 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4397 + [MT_CACHECLEAN_RO] = {
4398 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4399 .domain = DOMAIN_KERNEL,
4401 #ifndef CONFIG_ARM_LPAE
4402 - [MT_MINICLEAN] = {
4403 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4404 + [MT_MINICLEAN_RO] = {
4405 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4406 .domain = DOMAIN_KERNEL,
4409 @@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4410 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4412 .prot_l1 = PMD_TYPE_TABLE,
4413 - .domain = DOMAIN_USER,
4414 + .domain = DOMAIN_VECTORS,
4416 [MT_HIGH_VECTORS] = {
4417 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4418 L_PTE_USER | L_PTE_RDONLY,
4419 .prot_l1 = PMD_TYPE_TABLE,
4420 - .domain = DOMAIN_USER,
4421 + .domain = DOMAIN_VECTORS,
4423 - [MT_MEMORY_RWX] = {
4424 + [__MT_MEMORY_RWX] = {
4425 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4426 .prot_l1 = PMD_TYPE_TABLE,
4427 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4428 @@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4429 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4430 .domain = DOMAIN_KERNEL,
4433 - .prot_sect = PMD_TYPE_SECT,
4434 + [MT_MEMORY_RX] = {
4435 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4436 + .prot_l1 = PMD_TYPE_TABLE,
4437 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438 + .domain = DOMAIN_KERNEL,
4441 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4442 .domain = DOMAIN_KERNEL,
4444 - [MT_MEMORY_RWX_NONCACHED] = {
4445 + [MT_MEMORY_RW_NONCACHED] = {
4446 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4447 L_PTE_MT_BUFFERABLE,
4448 .prot_l1 = PMD_TYPE_TABLE,
4449 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4450 .domain = DOMAIN_KERNEL,
4452 + [MT_MEMORY_RX_NONCACHED] = {
4453 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4454 + L_PTE_MT_BUFFERABLE,
4455 + .prot_l1 = PMD_TYPE_TABLE,
4456 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4457 + .domain = DOMAIN_KERNEL,
4459 [MT_MEMORY_RW_DTCM] = {
4460 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4462 @@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4463 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4464 .domain = DOMAIN_KERNEL,
4466 - [MT_MEMORY_RWX_ITCM] = {
4467 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4468 + [MT_MEMORY_RX_ITCM] = {
4469 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4470 .prot_l1 = PMD_TYPE_TABLE,
4471 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4472 .domain = DOMAIN_KERNEL,
4474 [MT_MEMORY_RW_SO] = {
4475 @@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4476 * Mark cache clean areas and XIP ROM read only
4477 * from SVC mode and no access from userspace.
4479 - mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480 - mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4482 + mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483 +#ifdef CONFIG_PAX_KERNEXEC
4484 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4485 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4486 + mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4488 + mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4489 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4493 @@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4494 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4496 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4497 - mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4498 - mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4499 + mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4500 + mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4501 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4502 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4503 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4504 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4505 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4506 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4507 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4508 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4509 + mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4510 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4511 + mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4515 @@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4516 if (cpu_arch >= CPU_ARCH_ARMv6) {
4517 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4518 /* Non-cacheable Normal is XCB = 001 */
4519 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4520 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4521 + PMD_SECT_BUFFERED;
4522 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4525 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4526 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4527 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4529 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4533 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4534 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4535 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4538 #ifdef CONFIG_ARM_LPAE
4539 @@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4540 user_pgprot |= PTE_EXT_PXN;
4543 + user_pgprot |= __supported_pte_mask;
4545 for (i = 0; i < 16; i++) {
4546 pteval_t v = pgprot_val(protection_map[i]);
4547 protection_map[i] = __pgprot(v | user_pgprot);
4548 @@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4550 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4551 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4552 - mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4553 - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4554 + mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4555 + mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4556 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4557 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4558 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4559 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4560 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4561 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4562 - mem_types[MT_ROM].prot_sect |= cp->pmd;
4563 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4564 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4565 + mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4569 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4570 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4574 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4575 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4578 pr_info("Memory policy: %sData cache %s\n",
4579 @@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4583 - if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4584 + if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4585 md->virtual >= PAGE_OFFSET &&
4586 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4587 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4588 @@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4589 * called function. This means you can't use any function or debugging
4590 * method which may touch any device, otherwise the kernel _will_ crash.
4593 +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4595 static void __init devicemaps_init(const struct machine_desc *mdesc)
4597 struct map_desc map;
4602 - * Allocate the vector page early.
4604 - vectors = early_alloc(PAGE_SIZE * 2);
4606 - early_trap_init(vectors);
4607 + early_trap_init(&vectors);
4609 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4610 pmd_clear(pmd_off_k(addr));
4611 @@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4612 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4613 map.virtual = MODULES_VADDR;
4614 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4615 - map.type = MT_ROM;
4616 + map.type = MT_ROM_RX;
4617 create_mapping(&map);
4620 @@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4621 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4622 map.virtual = FLUSH_BASE;
4624 - map.type = MT_CACHECLEAN;
4625 + map.type = MT_CACHECLEAN_RO;
4626 create_mapping(&map);
4628 #ifdef FLUSH_BASE_MINICACHE
4629 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4630 map.virtual = FLUSH_BASE_MINICACHE;
4632 - map.type = MT_MINICLEAN;
4633 + map.type = MT_MINICLEAN_RO;
4634 create_mapping(&map);
4637 @@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4638 * location (0xffff0000). If we aren't using high-vectors, also
4639 * create a mapping at the low-vectors virtual address.
4641 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4642 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4643 map.virtual = 0xffff0000;
4644 map.length = PAGE_SIZE;
4645 #ifdef CONFIG_KUSER_HELPERS
4646 @@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4647 static void __init map_lowmem(void)
4649 struct memblock_region *reg;
4650 +#ifndef CONFIG_PAX_KERNEXEC
4651 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4652 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4655 /* Map all the lowmem memory banks. */
4656 for_each_memblock(memory, reg) {
4657 @@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4661 +#ifdef CONFIG_PAX_KERNEXEC
4662 + map.pfn = __phys_to_pfn(start);
4663 + map.virtual = __phys_to_virt(start);
4664 + map.length = end - start;
4666 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4667 + struct map_desc kernel;
4668 + struct map_desc initmap;
4670 + /* when freeing initmem we will make this RW */
4671 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4672 + initmap.virtual = (unsigned long)__init_begin;
4673 + initmap.length = _sdata - __init_begin;
4674 + initmap.type = __MT_MEMORY_RWX;
4675 + create_mapping(&initmap);
4677 + /* when freeing initmem we will make this RX */
4678 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4679 + kernel.virtual = (unsigned long)_stext;
4680 + kernel.length = __init_begin - _stext;
4681 + kernel.type = __MT_MEMORY_RWX;
4682 + create_mapping(&kernel);
4684 + if (map.virtual < (unsigned long)_stext) {
4685 + map.length = (unsigned long)_stext - map.virtual;
4686 + map.type = __MT_MEMORY_RWX;
4687 + create_mapping(&map);
4690 + map.pfn = __phys_to_pfn(__pa(_sdata));
4691 + map.virtual = (unsigned long)_sdata;
4692 + map.length = end - __pa(_sdata);
4695 + map.type = MT_MEMORY_RW;
4696 + create_mapping(&map);
4698 if (end < kernel_x_start) {
4699 map.pfn = __phys_to_pfn(start);
4700 map.virtual = __phys_to_virt(start);
4701 map.length = end - start;
4702 - map.type = MT_MEMORY_RWX;
4703 + map.type = __MT_MEMORY_RWX;
4705 create_mapping(&map);
4706 } else if (start >= kernel_x_end) {
4707 @@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4708 map.pfn = __phys_to_pfn(kernel_x_start);
4709 map.virtual = __phys_to_virt(kernel_x_start);
4710 map.length = kernel_x_end - kernel_x_start;
4711 - map.type = MT_MEMORY_RWX;
4712 + map.type = __MT_MEMORY_RWX;
4714 create_mapping(&map);
4716 @@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4717 create_mapping(&map);
4724 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4725 index e0e2358..a4ee460 100644
4726 --- a/arch/arm/net/bpf_jit_32.c
4727 +++ b/arch/arm/net/bpf_jit_32.c
4729 #include <asm/cacheflush.h>
4730 #include <asm/hwcap.h>
4731 #include <asm/opcodes.h>
4732 +#include <asm/pgtable.h>
4734 #include "bpf_jit_32.h"
4736 @@ -72,34 +73,58 @@ struct jit_ctx {
4740 +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4741 +int bpf_jit_enable __read_only;
4743 int bpf_jit_enable __read_mostly;
4746 -static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4747 +static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
4748 + unsigned int size)
4750 + void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
4754 + memcpy(ret, ptr, size);
4758 +static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
4763 - err = skb_copy_bits(skb, offset, &ret, 1);
4765 + err = call_neg_helper(skb, offset, &ret, 1);
4767 + err = skb_copy_bits(skb, offset, &ret, 1);
4769 return (u64)err << 32 | ret;
4772 -static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
4773 +static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
4778 - err = skb_copy_bits(skb, offset, &ret, 2);
4780 + err = call_neg_helper(skb, offset, &ret, 2);
4782 + err = skb_copy_bits(skb, offset, &ret, 2);
4784 return (u64)err << 32 | ntohs(ret);
4787 -static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
4788 +static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
4793 - err = skb_copy_bits(skb, offset, &ret, 4);
4795 + err = call_neg_helper(skb, offset, &ret, 4);
4797 + err = skb_copy_bits(skb, offset, &ret, 4);
4799 return (u64)err << 32 | ntohl(ret);
4801 @@ -179,8 +204,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4804 /* We are guaranteed to have aligned memory. */
4805 + pax_open_kernel();
4806 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4807 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4808 + pax_close_kernel();
4811 static void build_prologue(struct jit_ctx *ctx)
4812 @@ -536,9 +563,6 @@ static int build_body(struct jit_ctx *ctx)
4813 case BPF_LD | BPF_B | BPF_ABS:
4816 - /* the interpreter will deal with the negative K */
4819 emit_mov_i(r_off, k, ctx);
4821 ctx->seen |= SEEN_DATA | SEEN_CALL;
4822 @@ -547,12 +571,24 @@ load_common:
4823 emit(ARM_SUB_I(r_scratch, r_skb_hl,
4824 1 << load_order), ctx);
4825 emit(ARM_CMP_R(r_scratch, r_off), ctx);
4826 - condt = ARM_COND_HS;
4827 + condt = ARM_COND_GE;
4829 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
4830 condt = ARM_COND_HI;
4834 + * test for negative offset, only if we are
4835 + * currently scheduled to take the fast
4836 + * path. this will update the flags so that
4837 + * the slowpath instruction are ignored if the
4838 + * offset is negative.
4840 + * for loard_order == 0 the HI condition will
4841 + * make loads at offset 0 take the slow path too.
4843 + _emit(condt, ARM_CMP_I(r_off, 0), ctx);
4845 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
4848 @@ -860,9 +896,11 @@ b_epilogue:
4849 off = offsetof(struct sk_buff, vlan_tci);
4850 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
4851 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
4852 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
4854 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
4855 + OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
4857 + OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
4858 + OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
4861 case BPF_ANC | SKF_AD_QUEUE:
4862 ctx->seen |= SEEN_SKB;
4863 diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4864 index 5b217f4..c23f40e 100644
4865 --- a/arch/arm/plat-iop/setup.c
4866 +++ b/arch/arm/plat-iop/setup.c
4867 @@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4868 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4869 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4870 .length = IOP3XX_PERIPHERAL_SIZE,
4871 - .type = MT_UNCACHED,
4872 + .type = MT_UNCACHED_RW,
4876 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4877 index a5bc92d..0bb4730 100644
4878 --- a/arch/arm/plat-omap/sram.c
4879 +++ b/arch/arm/plat-omap/sram.c
4880 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4881 * Looks like we need to preserve some bootloader code at the
4882 * beginning of SRAM for jumping to flash for reboot to work...
4884 + pax_open_kernel();
4885 memset_io(omap_sram_base + omap_sram_skip, 0,
4886 omap_sram_size - omap_sram_skip);
4887 + pax_close_kernel();
4889 diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4890 index 7047051..44e8675 100644
4891 --- a/arch/arm64/include/asm/atomic.h
4892 +++ b/arch/arm64/include/asm/atomic.h
4893 @@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4894 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4895 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4897 +#define atomic64_read_unchecked(v) atomic64_read(v)
4898 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4899 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4900 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4901 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4902 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4903 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4904 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4905 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4909 diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4910 index 71f19c4..2b13cfe 100644
4911 --- a/arch/arm64/include/asm/barrier.h
4912 +++ b/arch/arm64/include/asm/barrier.h
4915 compiletime_assert_atomic_type(*p); \
4917 - ACCESS_ONCE(*p) = (v); \
4918 + ACCESS_ONCE_RW(*p) = (v); \
4921 #define smp_load_acquire(p) \
4922 diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4923 index 4fde8c1..441f84f 100644
4924 --- a/arch/arm64/include/asm/percpu.h
4925 +++ b/arch/arm64/include/asm/percpu.h
4926 @@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4930 - ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4931 + ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4934 - ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4935 + ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4938 - ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4939 + ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4942 - ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4943 + ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4947 diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4948 index 7642056..bffc904 100644
4949 --- a/arch/arm64/include/asm/pgalloc.h
4950 +++ b/arch/arm64/include/asm/pgalloc.h
4951 @@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4952 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4955 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4957 + pud_populate(mm, pud, pmd);
4960 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
4962 #if CONFIG_PGTABLE_LEVELS > 3
4963 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4964 index 07e1ba44..ec8cbbb 100644
4965 --- a/arch/arm64/include/asm/uaccess.h
4966 +++ b/arch/arm64/include/asm/uaccess.h
4967 @@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4971 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4972 #define access_ok(type, addr, size) __range_ok(addr, size)
4973 #define user_addr_max get_fs
4975 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4976 index b0bd4e5..54e82f6 100644
4977 --- a/arch/arm64/mm/dma-mapping.c
4978 +++ b/arch/arm64/mm/dma-mapping.c
4979 @@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4980 phys_to_page(paddr),
4981 size >> PAGE_SHIFT);
4983 - swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4984 + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4987 static void *__dma_alloc(struct device *dev, size_t size,
4988 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4989 index c3a58a1..78fbf54 100644
4990 --- a/arch/avr32/include/asm/cache.h
4991 +++ b/arch/avr32/include/asm/cache.h
4993 #ifndef __ASM_AVR32_CACHE_H
4994 #define __ASM_AVR32_CACHE_H
4996 +#include <linux/const.h>
4998 #define L1_CACHE_SHIFT 5
4999 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5000 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5003 * Memory returned by kmalloc() may be used for DMA, so we must make
5004 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5005 index 0388ece..87c8df1 100644
5006 --- a/arch/avr32/include/asm/elf.h
5007 +++ b/arch/avr32/include/asm/elf.h
5008 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5009 the loader. We need to make sure that it is out of the way of the program
5010 that it will "exec", and that there is sufficient room for the brk. */
5012 -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5013 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5015 +#ifdef CONFIG_PAX_ASLR
5016 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5018 +#define PAX_DELTA_MMAP_LEN 15
5019 +#define PAX_DELTA_STACK_LEN 15
5022 /* This yields a mask that user programs can use to figure out what
5023 instruction set this CPU supports. This could be done in user space,
5024 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5025 index 479330b..53717a8 100644
5026 --- a/arch/avr32/include/asm/kmap_types.h
5027 +++ b/arch/avr32/include/asm/kmap_types.h
5029 #define __ASM_AVR32_KMAP_TYPES_H
5031 #ifdef CONFIG_DEBUG_HIGHMEM
5032 -# define KM_TYPE_NR 29
5033 +# define KM_TYPE_NR 30
5035 -# define KM_TYPE_NR 14
5036 +# define KM_TYPE_NR 15
5039 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5040 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5041 index d223a8b..69c5210 100644
5042 --- a/arch/avr32/mm/fault.c
5043 +++ b/arch/avr32/mm/fault.c
5044 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5046 int exception_trace = 1;
5048 +#ifdef CONFIG_PAX_PAGEEXEC
5049 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5053 + printk(KERN_ERR "PAX: bytes at PC: ");
5054 + for (i = 0; i < 20; i++) {
5056 + if (get_user(c, (unsigned char *)pc+i))
5057 + printk(KERN_CONT "???????? ");
5059 + printk(KERN_CONT "%02x ", c);
5066 * This routine handles page faults. It determines the address and the
5067 * problem, and then passes it off to one of the appropriate routines.
5068 @@ -178,6 +195,16 @@ bad_area:
5069 up_read(&mm->mmap_sem);
5071 if (user_mode(regs)) {
5073 +#ifdef CONFIG_PAX_PAGEEXEC
5074 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5075 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5076 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5077 + do_group_exit(SIGKILL);
5082 if (exception_trace && printk_ratelimit())
5083 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5084 "sp %08lx ecr %lu\n",
5085 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5086 index 568885a..f8008df 100644
5087 --- a/arch/blackfin/include/asm/cache.h
5088 +++ b/arch/blackfin/include/asm/cache.h
5090 #ifndef __ARCH_BLACKFIN_CACHE_H
5091 #define __ARCH_BLACKFIN_CACHE_H
5093 +#include <linux/const.h>
5094 #include <linux/linkage.h> /* for asmlinkage */
5098 * Blackfin loads 32 bytes for cache
5100 #define L1_CACHE_SHIFT 5
5101 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5102 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5103 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5105 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5106 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5107 index aea2718..3639a60 100644
5108 --- a/arch/cris/include/arch-v10/arch/cache.h
5109 +++ b/arch/cris/include/arch-v10/arch/cache.h
5111 #ifndef _ASM_ARCH_CACHE_H
5112 #define _ASM_ARCH_CACHE_H
5114 +#include <linux/const.h>
5115 /* Etrax 100LX have 32-byte cache-lines. */
5116 -#define L1_CACHE_BYTES 32
5117 #define L1_CACHE_SHIFT 5
5118 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5120 #endif /* _ASM_ARCH_CACHE_H */
5121 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5122 index 7caf25d..ee65ac5 100644
5123 --- a/arch/cris/include/arch-v32/arch/cache.h
5124 +++ b/arch/cris/include/arch-v32/arch/cache.h
5126 #ifndef _ASM_CRIS_ARCH_CACHE_H
5127 #define _ASM_CRIS_ARCH_CACHE_H
5129 +#include <linux/const.h>
5130 #include <arch/hwregs/dma.h>
5132 /* A cache-line is 32 bytes. */
5133 -#define L1_CACHE_BYTES 32
5134 #define L1_CACHE_SHIFT 5
5135 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5137 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5139 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5140 index 102190a..5334cea 100644
5141 --- a/arch/frv/include/asm/atomic.h
5142 +++ b/arch/frv/include/asm/atomic.h
5143 @@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5144 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5145 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5147 +#define atomic64_read_unchecked(v) atomic64_read(v)
5148 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5149 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5150 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5151 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5152 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5153 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5154 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5155 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5157 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5160 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5161 index 2797163..c2a401df9 100644
5162 --- a/arch/frv/include/asm/cache.h
5163 +++ b/arch/frv/include/asm/cache.h
5165 #ifndef __ASM_CACHE_H
5166 #define __ASM_CACHE_H
5168 +#include <linux/const.h>
5170 /* bytes per L1 cache line */
5171 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5172 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5173 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5175 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5176 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5177 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5178 index 43901f2..0d8b865 100644
5179 --- a/arch/frv/include/asm/kmap_types.h
5180 +++ b/arch/frv/include/asm/kmap_types.h
5182 #ifndef _ASM_KMAP_TYPES_H
5183 #define _ASM_KMAP_TYPES_H
5185 -#define KM_TYPE_NR 17
5186 +#define KM_TYPE_NR 18
5189 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5190 index 836f147..4cf23f5 100644
5191 --- a/arch/frv/mm/elf-fdpic.c
5192 +++ b/arch/frv/mm/elf-fdpic.c
5193 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5195 struct vm_area_struct *vma;
5196 struct vm_unmapped_area_info info;
5197 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5199 if (len > TASK_SIZE)
5201 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5203 addr = PAGE_ALIGN(addr);
5204 vma = find_vma(current->mm, addr);
5205 - if (TASK_SIZE - len >= addr &&
5206 - (!vma || addr + len <= vma->vm_start))
5207 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5211 @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5212 info.high_limit = (current->mm->start_stack - 0x00200000);
5213 info.align_mask = 0;
5214 info.align_offset = 0;
5215 + info.threadstack_offset = offset;
5216 addr = vm_unmapped_area(&info);
5217 if (!(addr & ~PAGE_MASK))
5219 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5220 index 69952c18..4fa2908 100644
5221 --- a/arch/hexagon/include/asm/cache.h
5222 +++ b/arch/hexagon/include/asm/cache.h
5224 #ifndef __ASM_CACHE_H
5225 #define __ASM_CACHE_H
5227 +#include <linux/const.h>
5229 /* Bytes per L1 cache line */
5230 -#define L1_CACHE_SHIFT (5)
5231 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5232 +#define L1_CACHE_SHIFT 5
5233 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5235 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5237 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5238 index 76d25b2..d3793a0f 100644
5239 --- a/arch/ia64/Kconfig
5240 +++ b/arch/ia64/Kconfig
5241 @@ -541,6 +541,7 @@ source "drivers/sn/Kconfig"
5243 bool "kexec system call"
5244 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5245 + depends on !GRKERNSEC_KMEM
5247 kexec is a system call that implements the ability to shutdown your
5248 current kernel, and to start another kernel. It is like a reboot
5249 diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5250 index 970d0bd..e750b9b 100644
5251 --- a/arch/ia64/Makefile
5252 +++ b/arch/ia64/Makefile
5253 @@ -98,5 +98,6 @@ endef
5254 archprepare: make_nr_irqs_h FORCE
5255 PHONY += make_nr_irqs_h FORCE
5257 +make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5258 make_nr_irqs_h: FORCE
5259 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5260 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5261 index 0bf0350..2ad1957 100644
5262 --- a/arch/ia64/include/asm/atomic.h
5263 +++ b/arch/ia64/include/asm/atomic.h
5264 @@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5265 #define atomic64_inc(v) atomic64_add(1, (v))
5266 #define atomic64_dec(v) atomic64_sub(1, (v))
5268 +#define atomic64_read_unchecked(v) atomic64_read(v)
5269 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5270 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5271 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5272 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5273 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5274 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5275 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5276 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5278 #endif /* _ASM_IA64_ATOMIC_H */
5279 diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5280 index f6769eb..1cdb590 100644
5281 --- a/arch/ia64/include/asm/barrier.h
5282 +++ b/arch/ia64/include/asm/barrier.h
5285 compiletime_assert_atomic_type(*p); \
5287 - ACCESS_ONCE(*p) = (v); \
5288 + ACCESS_ONCE_RW(*p) = (v); \
5291 #define smp_load_acquire(p) \
5292 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5293 index 988254a..e1ee885 100644
5294 --- a/arch/ia64/include/asm/cache.h
5295 +++ b/arch/ia64/include/asm/cache.h
5297 #ifndef _ASM_IA64_CACHE_H
5298 #define _ASM_IA64_CACHE_H
5300 +#include <linux/const.h>
5303 * Copyright (C) 1998-2000 Hewlett-Packard Co
5306 /* Bytes per L1 (data) cache line. */
5307 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5308 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5309 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5312 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5313 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5314 index 5a83c5c..4d7f553 100644
5315 --- a/arch/ia64/include/asm/elf.h
5316 +++ b/arch/ia64/include/asm/elf.h
5319 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5321 +#ifdef CONFIG_PAX_ASLR
5322 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5324 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5325 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5328 #define PT_IA_64_UNWIND 0x70000001
5330 /* IA-64 relocations: */
5331 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5332 index f5e70e9..624fad5 100644
5333 --- a/arch/ia64/include/asm/pgalloc.h
5334 +++ b/arch/ia64/include/asm/pgalloc.h
5335 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5336 pgd_val(*pgd_entry) = __pa(pud);
5340 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5342 + pgd_populate(mm, pgd_entry, pud);
5345 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5347 return quicklist_alloc(0, GFP_KERNEL, NULL);
5348 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5349 pud_val(*pud_entry) = __pa(pmd);
5353 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5355 + pud_populate(mm, pud_entry, pmd);
5358 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5360 return quicklist_alloc(0, GFP_KERNEL, NULL);
5361 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5362 index 9f3ed9e..c99b418 100644
5363 --- a/arch/ia64/include/asm/pgtable.h
5364 +++ b/arch/ia64/include/asm/pgtable.h
5366 * David Mosberger-Tang <davidm@hpl.hp.com>
5370 +#include <linux/const.h>
5371 #include <asm/mman.h>
5372 #include <asm/page.h>
5373 #include <asm/processor.h>
5374 @@ -139,6 +139,17 @@
5375 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5376 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5377 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5379 +#ifdef CONFIG_PAX_PAGEEXEC
5380 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5381 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5382 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5384 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5385 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5386 +# define PAGE_COPY_NOEXEC PAGE_COPY
5389 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5390 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5391 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5392 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5393 index 45698cd..e8e2dbc 100644
5394 --- a/arch/ia64/include/asm/spinlock.h
5395 +++ b/arch/ia64/include/asm/spinlock.h
5396 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5397 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5399 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5400 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5401 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5404 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5405 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5406 index 4f3fb6cc..254055e 100644
5407 --- a/arch/ia64/include/asm/uaccess.h
5408 +++ b/arch/ia64/include/asm/uaccess.h
5410 && ((segment).seg == KERNEL_DS.seg \
5411 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5413 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5414 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5417 @@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5418 static inline unsigned long
5419 __copy_to_user (void __user *to, const void *from, unsigned long count)
5421 + if (count > INT_MAX)
5424 + if (!__builtin_constant_p(count))
5425 + check_object_size(from, count, true);
5427 return __copy_user(to, (__force void __user *) from, count);
5430 static inline unsigned long
5431 __copy_from_user (void *to, const void __user *from, unsigned long count)
5433 + if (count > INT_MAX)
5436 + if (!__builtin_constant_p(count))
5437 + check_object_size(to, count, false);
5439 return __copy_user((__force void __user *) to, from, count);
5442 @@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5444 void __user *__cu_to = (to); \
5445 const void *__cu_from = (from); \
5446 - long __cu_len = (n); \
5447 + unsigned long __cu_len = (n); \
5449 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
5450 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5451 + if (!__builtin_constant_p(n)) \
5452 + check_object_size(__cu_from, __cu_len, true); \
5453 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5458 @@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5460 void *__cu_to = (to); \
5461 const void __user *__cu_from = (from); \
5462 - long __cu_len = (n); \
5463 + unsigned long __cu_len = (n); \
5465 __chk_user_ptr(__cu_from); \
5466 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
5467 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5468 + if (!__builtin_constant_p(n)) \
5469 + check_object_size(__cu_to, __cu_len, false); \
5470 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5475 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5476 index 29754aa..06d2838 100644
5477 --- a/arch/ia64/kernel/module.c
5478 +++ b/arch/ia64/kernel/module.c
5479 @@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5483 +in_init_rx (const struct module *mod, uint64_t addr)
5485 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5489 +in_init_rw (const struct module *mod, uint64_t addr)
5491 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5495 in_init (const struct module *mod, uint64_t addr)
5497 - return addr - (uint64_t) mod->module_init < mod->init_size;
5498 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5502 +in_core_rx (const struct module *mod, uint64_t addr)
5504 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5508 +in_core_rw (const struct module *mod, uint64_t addr)
5510 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5514 in_core (const struct module *mod, uint64_t addr)
5516 - return addr - (uint64_t) mod->module_core < mod->core_size;
5517 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5521 @@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5525 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5526 + if (in_init_rx(mod, val))
5527 + val -= (uint64_t) mod->module_init_rx;
5528 + else if (in_init_rw(mod, val))
5529 + val -= (uint64_t) mod->module_init_rw;
5530 + else if (in_core_rx(mod, val))
5531 + val -= (uint64_t) mod->module_core_rx;
5532 + else if (in_core_rw(mod, val))
5533 + val -= (uint64_t) mod->module_core_rw;
5537 @@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5538 * addresses have been selected...
5541 - if (mod->core_size > MAX_LTOFF)
5542 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5544 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5545 * at the end of the module.
5547 - gp = mod->core_size - MAX_LTOFF / 2;
5548 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5550 - gp = mod->core_size / 2;
5551 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5552 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5553 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5555 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5557 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5558 index c39c3cd..3c77738 100644
5559 --- a/arch/ia64/kernel/palinfo.c
5560 +++ b/arch/ia64/kernel/palinfo.c
5561 @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5565 -static struct notifier_block __refdata palinfo_cpu_notifier =
5566 +static struct notifier_block palinfo_cpu_notifier =
5568 .notifier_call = palinfo_cpu_callback,
5570 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5571 index 41e33f8..65180b2a 100644
5572 --- a/arch/ia64/kernel/sys_ia64.c
5573 +++ b/arch/ia64/kernel/sys_ia64.c
5574 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5575 unsigned long align_mask = 0;
5576 struct mm_struct *mm = current->mm;
5577 struct vm_unmapped_area_info info;
5578 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5580 if (len > RGN_MAP_LIMIT)
5582 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5583 if (REGION_NUMBER(addr) == RGN_HPAGE)
5587 +#ifdef CONFIG_PAX_RANDMMAP
5588 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5589 + addr = mm->free_area_cache;
5594 addr = TASK_UNMAPPED_BASE;
5596 @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5597 info.high_limit = TASK_SIZE;
5598 info.align_mask = align_mask;
5599 info.align_offset = 0;
5600 + info.threadstack_offset = offset;
5601 return vm_unmapped_area(&info);
5604 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5605 index 84f8a52..7c76178 100644
5606 --- a/arch/ia64/kernel/vmlinux.lds.S
5607 +++ b/arch/ia64/kernel/vmlinux.lds.S
5608 @@ -192,7 +192,7 @@ SECTIONS {
5610 . = ALIGN(PERCPU_PAGE_SIZE);
5611 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5612 - __phys_per_cpu_start = __per_cpu_load;
5613 + __phys_per_cpu_start = per_cpu_load;
5615 * ensure percpu data fits
5616 * into percpu page size
5617 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5618 index ba5ba7a..36e9d3a 100644
5619 --- a/arch/ia64/mm/fault.c
5620 +++ b/arch/ia64/mm/fault.c
5621 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5622 return pte_present(pte);
5625 +#ifdef CONFIG_PAX_PAGEEXEC
5626 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5630 + printk(KERN_ERR "PAX: bytes at PC: ");
5631 + for (i = 0; i < 8; i++) {
5633 + if (get_user(c, (unsigned int *)pc+i))
5634 + printk(KERN_CONT "???????? ");
5636 + printk(KERN_CONT "%08x ", c);
5642 # define VM_READ_BIT 0
5643 # define VM_WRITE_BIT 1
5644 # define VM_EXEC_BIT 2
5645 @@ -151,8 +168,21 @@ retry:
5646 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5649 - if ((vma->vm_flags & mask) != mask)
5650 + if ((vma->vm_flags & mask) != mask) {
5652 +#ifdef CONFIG_PAX_PAGEEXEC
5653 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5654 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5657 + up_read(&mm->mmap_sem);
5658 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5659 + do_group_exit(SIGKILL);
5667 * If for any reason at all we couldn't handle the fault, make
5668 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5669 index 52b7604b..455cb85 100644
5670 --- a/arch/ia64/mm/hugetlbpage.c
5671 +++ b/arch/ia64/mm/hugetlbpage.c
5672 @@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5673 unsigned long pgoff, unsigned long flags)
5675 struct vm_unmapped_area_info info;
5676 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5678 if (len > RGN_MAP_LIMIT)
5680 @@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5681 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5682 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5683 info.align_offset = 0;
5684 + info.threadstack_offset = offset;
5685 return vm_unmapped_area(&info);
5688 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5689 index a9b65cf..49ae1cf 100644
5690 --- a/arch/ia64/mm/init.c
5691 +++ b/arch/ia64/mm/init.c
5692 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5693 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5694 vma->vm_end = vma->vm_start + PAGE_SIZE;
5695 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5697 +#ifdef CONFIG_PAX_PAGEEXEC
5698 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5699 + vma->vm_flags &= ~VM_EXEC;
5701 +#ifdef CONFIG_PAX_MPROTECT
5702 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
5703 + vma->vm_flags &= ~VM_MAYEXEC;
5709 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5710 down_write(¤t->mm->mmap_sem);
5711 if (insert_vm_struct(current->mm, vma)) {
5712 @@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5713 gate_vma.vm_start = FIXADDR_USER_START;
5714 gate_vma.vm_end = FIXADDR_USER_END;
5715 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5716 - gate_vma.vm_page_prot = __P101;
5717 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5721 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5722 index 40b3ee98..8c2c112 100644
5723 --- a/arch/m32r/include/asm/cache.h
5724 +++ b/arch/m32r/include/asm/cache.h
5726 #ifndef _ASM_M32R_CACHE_H
5727 #define _ASM_M32R_CACHE_H
5729 +#include <linux/const.h>
5731 /* L1 cache line size */
5732 #define L1_CACHE_SHIFT 4
5733 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5734 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5736 #endif /* _ASM_M32R_CACHE_H */
5737 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5738 index 82abd15..d95ae5d 100644
5739 --- a/arch/m32r/lib/usercopy.c
5740 +++ b/arch/m32r/lib/usercopy.c
5743 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5749 if (access_ok(VERIFY_WRITE, to, n))
5750 __copy_user(to,from,n);
5751 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5753 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5759 if (access_ok(VERIFY_READ, from, n))
5760 __copy_user_zeroing(to,from,n);
5761 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5762 index 0395c51..5f26031 100644
5763 --- a/arch/m68k/include/asm/cache.h
5764 +++ b/arch/m68k/include/asm/cache.h
5766 #ifndef __ARCH_M68K_CACHE_H
5767 #define __ARCH_M68K_CACHE_H
5769 +#include <linux/const.h>
5771 /* bytes per L1 cache line */
5772 #define L1_CACHE_SHIFT 4
5773 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5774 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5776 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5778 diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5779 index d703d8e..a8e2d70 100644
5780 --- a/arch/metag/include/asm/barrier.h
5781 +++ b/arch/metag/include/asm/barrier.h
5782 @@ -90,7 +90,7 @@ static inline void fence(void)
5784 compiletime_assert_atomic_type(*p); \
5786 - ACCESS_ONCE(*p) = (v); \
5787 + ACCESS_ONCE_RW(*p) = (v); \
5790 #define smp_load_acquire(p) \
5791 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5792 index 7ca80ac..794ba72 100644
5793 --- a/arch/metag/mm/hugetlbpage.c
5794 +++ b/arch/metag/mm/hugetlbpage.c
5795 @@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5796 info.high_limit = TASK_SIZE;
5797 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5798 info.align_offset = 0;
5799 + info.threadstack_offset = 0;
5800 return vm_unmapped_area(&info);
5803 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5804 index 4efe96a..60e8699 100644
5805 --- a/arch/microblaze/include/asm/cache.h
5806 +++ b/arch/microblaze/include/asm/cache.h
5808 #ifndef _ASM_MICROBLAZE_CACHE_H
5809 #define _ASM_MICROBLAZE_CACHE_H
5811 +#include <linux/const.h>
5812 #include <asm/registers.h>
5814 #define L1_CACHE_SHIFT 5
5815 /* word-granular cache in microblaze */
5816 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5817 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5819 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5821 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5822 index f501665..b107753 100644
5823 --- a/arch/mips/Kconfig
5824 +++ b/arch/mips/Kconfig
5825 @@ -2585,6 +2585,7 @@ source "kernel/Kconfig.preempt"
5828 bool "Kexec system call"
5829 + depends on !GRKERNSEC_KMEM
5831 kexec is a system call that implements the ability to shutdown your
5832 current kernel, and to start another kernel. It is like a reboot
5833 diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5834 index d8960d4..77dbd31 100644
5835 --- a/arch/mips/cavium-octeon/dma-octeon.c
5836 +++ b/arch/mips/cavium-octeon/dma-octeon.c
5837 @@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5838 if (dma_release_from_coherent(dev, order, vaddr))
5841 - swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5842 + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5845 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5846 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5847 index 26d4363..3c9a82e 100644
5848 --- a/arch/mips/include/asm/atomic.h
5849 +++ b/arch/mips/include/asm/atomic.h
5851 #include <asm/cmpxchg.h>
5852 #include <asm/war.h>
5854 +#ifdef CONFIG_GENERIC_ATOMIC64
5855 +#include <asm-generic/atomic64.h>
5858 #define ATOMIC_INIT(i) { (i) }
5860 +#ifdef CONFIG_64BIT
5861 +#define _ASM_EXTABLE(from, to) \
5862 +" .section __ex_table,\"a\"\n" \
5863 +" .dword " #from ", " #to"\n" \
5866 +#define _ASM_EXTABLE(from, to) \
5867 +" .section __ex_table,\"a\"\n" \
5868 +" .word " #from ", " #to"\n" \
5873 * atomic_read - read atomic variable
5874 * @v: pointer of type atomic_t
5876 * Atomically reads the value of @v.
5878 -#define atomic_read(v) ACCESS_ONCE((v)->counter)
5879 +static inline int atomic_read(const atomic_t *v)
5881 + return ACCESS_ONCE(v->counter);
5884 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5886 + return ACCESS_ONCE(v->counter);
5890 * atomic_set - set atomic variable
5893 * Atomically sets the value of @v to @i.
5895 -#define atomic_set(v, i) ((v)->counter = (i))
5896 +static inline void atomic_set(atomic_t *v, int i)
5901 -#define ATOMIC_OP(op, c_op, asm_op) \
5902 -static __inline__ void atomic_##op(int i, atomic_t * v) \
5903 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5908 +#ifdef CONFIG_PAX_REFCOUNT
5909 +#define __OVERFLOW_POST \
5911 + " .set noreorder \n" \
5913 + " move %0, %1 \n" \
5914 + " .set reorder \n"
5915 +#define __OVERFLOW_EXTABLE \
5917 + _ASM_EXTABLE(2b, 3b)
5919 +#define __OVERFLOW_POST
5920 +#define __OVERFLOW_EXTABLE
5923 +#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5924 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5926 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5929 __asm__ __volatile__( \
5930 - " .set arch=r4000 \n" \
5931 - "1: ll %0, %1 # atomic_" #op " \n" \
5932 - " " #asm_op " %0, %2 \n" \
5933 + " .set mips3 \n" \
5934 + "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5935 + "2: " #asm_op " %0, %2 \n" \
5937 " beqzl %0, 1b \n" \
5940 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5942 } else if (kernel_uses_llsc) { \
5946 - __asm__ __volatile__( \
5947 - " .set "MIPS_ISA_LEVEL" \n" \
5948 - " ll %0, %1 # atomic_" #op "\n" \
5949 - " " #asm_op " %0, %2 \n" \
5951 - " .set mips0 \n" \
5952 - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5954 - } while (unlikely(!temp)); \
5955 + __asm__ __volatile__( \
5956 + " .set "MIPS_ISA_LEVEL" \n" \
5957 + "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5958 + "2: " #asm_op " %0, %2 \n" \
5960 + " beqz %0, 1b \n" \
5962 + " .set mips0 \n" \
5963 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5966 unsigned long flags; \
5968 raw_local_irq_save(flags); \
5969 - v->counter c_op i; \
5970 + __asm__ __volatile__( \
5971 + "2: " #asm_op " %0, %1 \n" \
5973 + : "+r" (v->counter) : "Ir" (i)); \
5974 raw_local_irq_restore(flags); \
5978 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5979 -static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5980 +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5981 + __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5983 +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5984 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5988 @@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5991 __asm__ __volatile__( \
5992 - " .set arch=r4000 \n" \
5993 - "1: ll %1, %2 # atomic_" #op "_return \n" \
5994 - " " #asm_op " %0, %1, %3 \n" \
5995 + " .set mips3 \n" \
5996 + "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5997 + "2: " #asm_op " %0, %1, %3 \n" \
5999 " beqzl %0, 1b \n" \
6000 - " " #asm_op " %0, %1, %3 \n" \
6003 + "4: " #asm_op " %0, %1, %3 \n" \
6006 : "=&r" (result), "=&r" (temp), \
6007 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6008 @@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6009 } else if (kernel_uses_llsc) { \
6013 - __asm__ __volatile__( \
6014 - " .set "MIPS_ISA_LEVEL" \n" \
6015 - " ll %1, %2 # atomic_" #op "_return \n" \
6016 - " " #asm_op " %0, %1, %3 \n" \
6018 - " .set mips0 \n" \
6019 - : "=&r" (result), "=&r" (temp), \
6020 - "+" GCC_OFF_SMALL_ASM() (v->counter) \
6022 - } while (unlikely(!result)); \
6023 + __asm__ __volatile__( \
6024 + " .set "MIPS_ISA_LEVEL" \n" \
6025 + "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
6026 + "2: " #asm_op " %0, %1, %3 \n" \
6030 + "4: " #asm_op " %0, %1, %3 \n" \
6032 + " .set mips0 \n" \
6033 + : "=&r" (result), "=&r" (temp), \
6034 + "+" GCC_OFF_SMALL_ASM() (v->counter) \
6037 result = temp; result c_op i; \
6039 unsigned long flags; \
6041 raw_local_irq_save(flags); \
6042 - result = v->counter; \
6044 - v->counter = result; \
6045 + __asm__ __volatile__( \
6047 + "2: " #asm_op " %0, %1, %2 \n" \
6051 + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6053 raw_local_irq_restore(flags); \
6056 @@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6060 -#define ATOMIC_OPS(op, c_op, asm_op) \
6061 - ATOMIC_OP(op, c_op, asm_op) \
6062 - ATOMIC_OP_RETURN(op, c_op, asm_op)
6063 +#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6064 + __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6066 -ATOMIC_OPS(add, +=, addu)
6067 -ATOMIC_OPS(sub, -=, subu)
6068 +#define ATOMIC_OPS(op, asm_op) \
6069 + ATOMIC_OP(op, asm_op) \
6070 + ATOMIC_OP_RETURN(op, asm_op)
6072 +ATOMIC_OPS(add, add)
6073 +ATOMIC_OPS(sub, sub)
6076 #undef ATOMIC_OP_RETURN
6077 +#undef __ATOMIC_OP_RETURN
6082 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
6083 @@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
6084 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6085 * The function returns the old value of @v minus @i.
6087 -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6088 +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6092 @@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6095 __asm__ __volatile__(
6096 - " .set arch=r4000 \n"
6097 + " .set "MIPS_ISA_LEVEL" \n"
6098 "1: ll %1, %2 # atomic_sub_if_positive\n"
6099 " subu %0, %1, %3 \n"
6101 @@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6105 -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6106 -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6107 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6109 + return cmpxchg(&v->counter, old, new);
6112 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6115 + return cmpxchg(&(v->counter), old, new);
6118 +static inline int atomic_xchg(atomic_t *v, int new)
6120 + return xchg(&v->counter, new);
6123 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6125 + return xchg(&(v->counter), new);
6129 * __atomic_add_unless - add unless the number is a given value
6130 @@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6132 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6133 #define atomic_inc_return(v) atomic_add_return(1, (v))
6134 +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6136 + return atomic_add_return_unchecked(1, v);
6140 * atomic_sub_and_test - subtract value from variable and test result
6141 @@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6145 +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6147 + return atomic_add_return_unchecked(1, v) == 0;
6151 * atomic_dec_and_test - decrement by 1 and test
6152 @@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6153 * Atomically increments @v by 1.
6155 #define atomic_inc(v) atomic_add(1, (v))
6156 +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6158 + atomic_add_unchecked(1, v);
6162 * atomic_dec - decrement and test
6163 @@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6164 * Atomically decrements @v by 1.
6166 #define atomic_dec(v) atomic_sub(1, (v))
6167 +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6169 + atomic_sub_unchecked(1, v);
6173 * atomic_add_negative - add and test if negative
6174 @@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6175 * @v: pointer of type atomic64_t
6178 -#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6179 +static inline long atomic64_read(const atomic64_t *v)
6181 + return ACCESS_ONCE(v->counter);
6184 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6186 + return ACCESS_ONCE(v->counter);
6190 * atomic64_set - set atomic variable
6191 * @v: pointer of type atomic64_t
6192 * @i: required value
6194 -#define atomic64_set(v, i) ((v)->counter = (i))
6195 +static inline void atomic64_set(atomic64_t *v, long i)
6200 -#define ATOMIC64_OP(op, c_op, asm_op) \
6201 -static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6202 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6207 +#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6208 +static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6210 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6213 __asm__ __volatile__( \
6214 - " .set arch=r4000 \n" \
6215 - "1: lld %0, %1 # atomic64_" #op " \n" \
6216 - " " #asm_op " %0, %2 \n" \
6217 + " .set "MIPS_ISA_LEVEL" \n" \
6218 + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6219 + "2: " #asm_op " %0, %2 \n" \
6221 " beqzl %0, 1b \n" \
6224 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6226 } else if (kernel_uses_llsc) { \
6230 - __asm__ __volatile__( \
6231 - " .set "MIPS_ISA_LEVEL" \n" \
6232 - " lld %0, %1 # atomic64_" #op "\n" \
6233 - " " #asm_op " %0, %2 \n" \
6234 - " scd %0, %1 \n" \
6235 - " .set mips0 \n" \
6236 - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6238 - } while (unlikely(!temp)); \
6239 + __asm__ __volatile__( \
6240 + " .set "MIPS_ISA_LEVEL" \n" \
6241 + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6242 + "2: " #asm_op " %0, %2 \n" \
6243 + " scd %0, %1 \n" \
6244 + " beqz %0, 1b \n" \
6246 + " .set mips0 \n" \
6247 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6250 unsigned long flags; \
6252 raw_local_irq_save(flags); \
6253 - v->counter c_op i; \
6254 + __asm__ __volatile__( \
6255 + "2: " #asm_op " %0, %1 \n" \
6257 + : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6258 raw_local_irq_restore(flags); \
6262 -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6263 -static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6264 +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6265 + __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6267 +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6268 +static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6272 @@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6275 __asm__ __volatile__( \
6276 - " .set arch=r4000 \n" \
6277 + " .set mips3 \n" \
6278 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6279 - " " #asm_op " %0, %1, %3 \n" \
6280 + "2: " #asm_op " %0, %1, %3 \n" \
6282 " beqzl %0, 1b \n" \
6283 - " " #asm_op " %0, %1, %3 \n" \
6286 + "4: " #asm_op " %0, %1, %3 \n" \
6289 : "=&r" (result), "=&r" (temp), \
6290 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6291 @@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6292 } else if (kernel_uses_llsc) { \
6296 - __asm__ __volatile__( \
6297 - " .set "MIPS_ISA_LEVEL" \n" \
6298 - " lld %1, %2 # atomic64_" #op "_return\n" \
6299 - " " #asm_op " %0, %1, %3 \n" \
6300 - " scd %0, %2 \n" \
6301 - " .set mips0 \n" \
6302 - : "=&r" (result), "=&r" (temp), \
6303 - "=" GCC_OFF_SMALL_ASM() (v->counter) \
6304 - : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6306 - } while (unlikely(!result)); \
6307 + __asm__ __volatile__( \
6308 + " .set "MIPS_ISA_LEVEL" \n" \
6309 + "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6310 + "2: " #asm_op " %0, %1, %3 \n" \
6311 + " scd %0, %2 \n" \
6312 + " beqz %0, 1b \n" \
6315 + "4: " #asm_op " %0, %1, %3 \n" \
6317 + " .set mips0 \n" \
6318 + : "=&r" (result), "=&r" (temp), \
6319 + "=" GCC_OFF_SMALL_ASM() (v->counter) \
6320 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6323 result = temp; result c_op i; \
6325 unsigned long flags; \
6327 raw_local_irq_save(flags); \
6328 - result = v->counter; \
6330 - v->counter = result; \
6331 + __asm__ __volatile__( \
6333 + "2: " #asm_op " %0, %1, %2 \n" \
6337 + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6339 raw_local_irq_restore(flags); \
6342 @@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6346 -#define ATOMIC64_OPS(op, c_op, asm_op) \
6347 - ATOMIC64_OP(op, c_op, asm_op) \
6348 - ATOMIC64_OP_RETURN(op, c_op, asm_op)
6349 +#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6350 + __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6352 -ATOMIC64_OPS(add, +=, daddu)
6353 -ATOMIC64_OPS(sub, -=, dsubu)
6354 +#define ATOMIC64_OPS(op, asm_op) \
6355 + ATOMIC64_OP(op, asm_op) \
6356 + ATOMIC64_OP_RETURN(op, asm_op)
6358 +ATOMIC64_OPS(add, dadd)
6359 +ATOMIC64_OPS(sub, dsub)
6362 #undef ATOMIC64_OP_RETURN
6363 +#undef __ATOMIC64_OP_RETURN
6365 +#undef __ATOMIC64_OP
6366 +#undef __OVERFLOW_EXTABLE
6367 +#undef __OVERFLOW_POST
6370 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6371 @@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6372 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6373 * The function returns the old value of @v minus @i.
6375 -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6376 +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6380 @@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6383 __asm__ __volatile__(
6384 - " .set arch=r4000 \n"
6385 + " .set "MIPS_ISA_LEVEL" \n"
6386 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6387 " dsubu %0, %1, %3 \n"
6389 @@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6393 -#define atomic64_cmpxchg(v, o, n) \
6394 - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6395 -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6396 +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6398 + return cmpxchg(&v->counter, old, new);
6401 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6404 + return cmpxchg(&(v->counter), old, new);
6407 +static inline long atomic64_xchg(atomic64_t *v, long new)
6409 + return xchg(&v->counter, new);
6412 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6414 + return xchg(&(v->counter), new);
6418 * atomic64_add_unless - add unless the number is a given value
6419 @@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6421 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6422 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6423 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6426 * atomic64_sub_and_test - subtract value from variable and test result
6427 @@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6430 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6431 +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6434 * atomic64_dec_and_test - decrement by 1 and test
6435 @@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6436 * Atomically increments @v by 1.
6438 #define atomic64_inc(v) atomic64_add(1, (v))
6439 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6442 * atomic64_dec - decrement and test
6443 @@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6444 * Atomically decrements @v by 1.
6446 #define atomic64_dec(v) atomic64_sub(1, (v))
6447 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6450 * atomic64_add_negative - add and test if negative
6451 diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6452 index 2b8bbbc..4556df6 100644
6453 --- a/arch/mips/include/asm/barrier.h
6454 +++ b/arch/mips/include/asm/barrier.h
6457 compiletime_assert_atomic_type(*p); \
6459 - ACCESS_ONCE(*p) = (v); \
6460 + ACCESS_ONCE_RW(*p) = (v); \
6463 #define smp_load_acquire(p) \
6464 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6465 index b4db69f..8f3b093 100644
6466 --- a/arch/mips/include/asm/cache.h
6467 +++ b/arch/mips/include/asm/cache.h
6469 #ifndef _ASM_CACHE_H
6470 #define _ASM_CACHE_H
6472 +#include <linux/const.h>
6473 #include <kmalloc.h>
6475 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6476 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6477 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6479 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6480 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6481 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6482 index f19e890..a4f8177 100644
6483 --- a/arch/mips/include/asm/elf.h
6484 +++ b/arch/mips/include/asm/elf.h
6485 @@ -417,6 +417,13 @@ extern const char *__elf_platform;
6486 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6489 +#ifdef CONFIG_PAX_ASLR
6490 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6492 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6493 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6496 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6497 struct linux_binprm;
6498 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6499 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6500 index c1f6afa..38cc6e9 100644
6501 --- a/arch/mips/include/asm/exec.h
6502 +++ b/arch/mips/include/asm/exec.h
6507 -extern unsigned long arch_align_stack(unsigned long sp);
6508 +#define arch_align_stack(x) ((x) & ~0xfUL)
6510 #endif /* _ASM_EXEC_H */
6511 diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6512 index 9e8ef59..1139d6b 100644
6513 --- a/arch/mips/include/asm/hw_irq.h
6514 +++ b/arch/mips/include/asm/hw_irq.h
6517 #include <linux/atomic.h>
6519 -extern atomic_t irq_err_count;
6520 +extern atomic_unchecked_t irq_err_count;
6523 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6524 diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6525 index 8feaed6..1bd8a64 100644
6526 --- a/arch/mips/include/asm/local.h
6527 +++ b/arch/mips/include/asm/local.h
6528 @@ -13,15 +13,25 @@ typedef struct
6533 + atomic_long_unchecked_t a;
6534 +} local_unchecked_t;
6536 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6538 #define local_read(l) atomic_long_read(&(l)->a)
6539 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6540 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6541 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6543 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6544 +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6545 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6546 +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6547 #define local_inc(l) atomic_long_inc(&(l)->a)
6548 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6549 #define local_dec(l) atomic_long_dec(&(l)->a)
6550 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6553 * Same as above, but return the result value
6554 @@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6558 +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6560 + unsigned long result;
6562 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6563 + unsigned long temp;
6565 + __asm__ __volatile__(
6567 + "1:" __LL "%1, %2 # local_add_return \n"
6568 + " addu %0, %1, %3 \n"
6570 + " beqzl %0, 1b \n"
6571 + " addu %0, %1, %3 \n"
6573 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6574 + : "Ir" (i), "m" (l->a.counter)
6576 + } else if (kernel_uses_llsc) {
6577 + unsigned long temp;
6579 + __asm__ __volatile__(
6581 + "1:" __LL "%1, %2 # local_add_return \n"
6582 + " addu %0, %1, %3 \n"
6585 + " addu %0, %1, %3 \n"
6587 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6588 + : "Ir" (i), "m" (l->a.counter)
6591 + unsigned long flags;
6593 + local_irq_save(flags);
6594 + result = l->a.counter;
6596 + l->a.counter = result;
6597 + local_irq_restore(flags);
6603 static __inline__ long local_sub_return(long i, local_t * l)
6605 unsigned long result;
6606 @@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6608 #define local_cmpxchg(l, o, n) \
6609 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6610 +#define local_cmpxchg_unchecked(l, o, n) \
6611 + ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6612 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6615 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6616 index 89dd7fe..a123c97 100644
6617 --- a/arch/mips/include/asm/page.h
6618 +++ b/arch/mips/include/asm/page.h
6619 @@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6620 #ifdef CONFIG_CPU_MIPS32
6621 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6622 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6623 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6624 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6626 typedef struct { unsigned long long pte; } pte_t;
6627 #define pte_val(x) ((x).pte)
6628 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6629 index b336037..5b874cc 100644
6630 --- a/arch/mips/include/asm/pgalloc.h
6631 +++ b/arch/mips/include/asm/pgalloc.h
6632 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6634 set_pud(pud, __pud((unsigned long)pmd));
6637 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6639 + pud_populate(mm, pud, pmd);
6644 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6645 index 819af9d..439839d 100644
6646 --- a/arch/mips/include/asm/pgtable.h
6647 +++ b/arch/mips/include/asm/pgtable.h
6650 #include <asm/pgtable-bits.h>
6652 +#define ktla_ktva(addr) (addr)
6653 +#define ktva_ktla(addr) (addr)
6656 struct vm_area_struct;
6658 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6659 index 9c0014e..5101ef5 100644
6660 --- a/arch/mips/include/asm/thread_info.h
6661 +++ b/arch/mips/include/asm/thread_info.h
6662 @@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
6663 #define TIF_SECCOMP 4 /* secure computing */
6664 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6665 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6666 +/* li takes a 32bit immediate */
6667 +#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6669 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6670 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6671 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6672 @@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
6673 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6674 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6675 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6676 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6678 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6679 _TIF_SYSCALL_AUDIT | \
6680 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6681 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6682 + _TIF_GRSEC_SETXID)
6684 /* work to do in syscall_trace_leave() */
6685 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6686 - _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6687 + _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6689 /* work to do on interrupt/exception return */
6690 #define _TIF_WORK_MASK \
6691 @@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
6692 /* work to do on any return to u-space */
6693 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6694 _TIF_WORK_SYSCALL_EXIT | \
6695 - _TIF_SYSCALL_TRACEPOINT)
6696 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6699 * We stash processor id into a COP0 register to retrieve it fast
6700 diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6701 index bf8b324..cec5705 100644
6702 --- a/arch/mips/include/asm/uaccess.h
6703 +++ b/arch/mips/include/asm/uaccess.h
6704 @@ -130,6 +130,7 @@ extern u64 __ua_limit;
6708 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6709 #define access_ok(type, addr, size) \
6710 likely(__access_ok((addr), (size), __access_mask))
6712 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6713 index 1188e00..41cf144 100644
6714 --- a/arch/mips/kernel/binfmt_elfn32.c
6715 +++ b/arch/mips/kernel/binfmt_elfn32.c
6716 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6717 #undef ELF_ET_DYN_BASE
6718 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6720 +#ifdef CONFIG_PAX_ASLR
6721 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6723 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6724 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6727 #include <asm/processor.h>
6728 #include <linux/module.h>
6729 #include <linux/elfcore.h>
6730 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6731 index 9287678..f870e47 100644
6732 --- a/arch/mips/kernel/binfmt_elfo32.c
6733 +++ b/arch/mips/kernel/binfmt_elfo32.c
6734 @@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6735 #undef ELF_ET_DYN_BASE
6736 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6738 +#ifdef CONFIG_PAX_ASLR
6739 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6741 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6742 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6745 #include <asm/processor.h>
6747 #include <linux/module.h>
6748 diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6749 index a74ec3a..4f06f18 100644
6750 --- a/arch/mips/kernel/i8259.c
6751 +++ b/arch/mips/kernel/i8259.c
6752 @@ -202,7 +202,7 @@ spurious_8259A_irq:
6753 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6754 spurious_irq_mask |= irqmask;
6756 - atomic_inc(&irq_err_count);
6757 + atomic_inc_unchecked(&irq_err_count);
6759 * Theoretically we do not have to handle this IRQ,
6760 * but in Linux this does not cause problems and is
6761 diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6762 index 44a1f79..2bd6aa3 100644
6763 --- a/arch/mips/kernel/irq-gt641xx.c
6764 +++ b/arch/mips/kernel/irq-gt641xx.c
6765 @@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6769 - atomic_inc(&irq_err_count);
6770 + atomic_inc_unchecked(&irq_err_count);
6773 void __init gt641xx_irq_init(void)
6774 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6775 index 3c8a18a..b4929b6 100644
6776 --- a/arch/mips/kernel/irq.c
6777 +++ b/arch/mips/kernel/irq.c
6778 @@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6779 printk("unexpected IRQ # %d\n", irq);
6782 -atomic_t irq_err_count;
6783 +atomic_unchecked_t irq_err_count;
6785 int arch_show_interrupts(struct seq_file *p, int prec)
6787 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6788 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6792 asmlinkage void spurious_interrupt(void)
6794 - atomic_inc(&irq_err_count);
6795 + atomic_inc_unchecked(&irq_err_count);
6798 void __init init_IRQ(void)
6799 @@ -110,6 +110,8 @@ void __init init_IRQ(void)
6802 #ifdef CONFIG_DEBUG_STACKOVERFLOW
6804 +extern void gr_handle_kernel_exploit(void);
6805 static inline void check_stack_overflow(void)
6808 @@ -125,6 +127,7 @@ static inline void check_stack_overflow(void)
6809 printk("do_IRQ: stack overflow: %ld\n",
6810 sp - sizeof(struct thread_info));
6812 + gr_handle_kernel_exploit();
6816 diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6817 index 0614717..002fa43 100644
6818 --- a/arch/mips/kernel/pm-cps.c
6819 +++ b/arch/mips/kernel/pm-cps.c
6820 @@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6821 nc_core_ready_count = nc_addr;
6823 /* Ensure ready_count is zero-initialised before the assembly runs */
6824 - ACCESS_ONCE(*nc_core_ready_count) = 0;
6825 + ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6826 coupled_barrier(&per_cpu(pm_barrier, core), online);
6828 /* Run the generated entry code */
6829 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6830 index f2975d4..f61d355 100644
6831 --- a/arch/mips/kernel/process.c
6832 +++ b/arch/mips/kernel/process.c
6833 @@ -541,18 +541,6 @@ out:
6838 - * Don't forget that the stack pointer must be aligned on a 8 bytes
6839 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6841 -unsigned long arch_align_stack(unsigned long sp)
6843 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6844 - sp -= get_random_int() & ~PAGE_MASK;
6846 - return sp & ALMASK;
6849 static void arch_dump_stack(void *info)
6851 struct pt_regs *regs;
6852 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6853 index e933a30..0d02625 100644
6854 --- a/arch/mips/kernel/ptrace.c
6855 +++ b/arch/mips/kernel/ptrace.c
6856 @@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
6860 +#ifdef CONFIG_GRKERNSEC_SETXID
6861 +extern void gr_delayed_cred_worker(void);
6865 * Notification of system call entry/exit
6866 * - triggered by current->work.syscall_trace
6867 @@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6868 tracehook_report_syscall_entry(regs))
6871 +#ifdef CONFIG_GRKERNSEC_SETXID
6872 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6873 + gr_delayed_cred_worker();
6876 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6877 trace_sys_enter(regs, regs->regs[2]);
6879 diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6880 index 2242bdd..b284048 100644
6881 --- a/arch/mips/kernel/sync-r4k.c
6882 +++ b/arch/mips/kernel/sync-r4k.c
6884 #include <asm/mipsregs.h>
6886 static atomic_t count_start_flag = ATOMIC_INIT(0);
6887 -static atomic_t count_count_start = ATOMIC_INIT(0);
6888 -static atomic_t count_count_stop = ATOMIC_INIT(0);
6889 +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6890 +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6891 static atomic_t count_reference = ATOMIC_INIT(0);
6894 @@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6896 for (i = 0; i < NR_LOOPS; i++) {
6897 /* slaves loop on '!= 2' */
6898 - while (atomic_read(&count_count_start) != 1)
6899 + while (atomic_read_unchecked(&count_count_start) != 1)
6901 - atomic_set(&count_count_stop, 0);
6902 + atomic_set_unchecked(&count_count_stop, 0);
6905 /* this lets the slaves write their count register */
6906 - atomic_inc(&count_count_start);
6907 + atomic_inc_unchecked(&count_count_start);
6910 * Everyone initialises count in the last loop:
6911 @@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6913 * Wait for all slaves to leave the synchronization point:
6915 - while (atomic_read(&count_count_stop) != 1)
6916 + while (atomic_read_unchecked(&count_count_stop) != 1)
6918 - atomic_set(&count_count_start, 0);
6919 + atomic_set_unchecked(&count_count_start, 0);
6921 - atomic_inc(&count_count_stop);
6922 + atomic_inc_unchecked(&count_count_stop);
6924 /* Arrange for an interrupt in a short while */
6925 write_c0_compare(read_c0_count() + COUNTON);
6926 @@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6927 initcount = atomic_read(&count_reference);
6929 for (i = 0; i < NR_LOOPS; i++) {
6930 - atomic_inc(&count_count_start);
6931 - while (atomic_read(&count_count_start) != 2)
6932 + atomic_inc_unchecked(&count_count_start);
6933 + while (atomic_read_unchecked(&count_count_start) != 2)
6937 @@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6938 if (i == NR_LOOPS-1)
6939 write_c0_count(initcount);
6941 - atomic_inc(&count_count_stop);
6942 - while (atomic_read(&count_count_stop) != 2)
6943 + atomic_inc_unchecked(&count_count_stop);
6944 + while (atomic_read_unchecked(&count_count_stop) != 2)
6947 /* Arrange for an interrupt in a short while */
6948 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6949 index d2d1c19..3e21d8d 100644
6950 --- a/arch/mips/kernel/traps.c
6951 +++ b/arch/mips/kernel/traps.c
6952 @@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6955 prev_state = exception_enter();
6956 - die_if_kernel("Integer overflow", regs);
6957 + if (unlikely(!user_mode(regs))) {
6959 +#ifdef CONFIG_PAX_REFCOUNT
6960 + if (fixup_exception(regs)) {
6961 + pax_report_refcount_overflow(regs);
6962 + exception_exit(prev_state);
6967 + die("Integer overflow", regs);
6970 info.si_code = FPE_INTOVF;
6971 info.si_signo = SIGFPE;
6972 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6973 index 52f205a..335927c 100644
6974 --- a/arch/mips/kvm/mips.c
6975 +++ b/arch/mips/kvm/mips.c
6976 @@ -1013,7 +1013,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6980 -int kvm_arch_init(void *opaque)
6981 +int kvm_arch_init(const void *opaque)
6983 if (kvm_mips_callbacks) {
6984 kvm_err("kvm: module already exists\n");
6985 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6986 index 7ff8637..6004edb 100644
6987 --- a/arch/mips/mm/fault.c
6988 +++ b/arch/mips/mm/fault.c
6991 int show_unhandled_signals = 1;
6993 +#ifdef CONFIG_PAX_PAGEEXEC
6994 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6998 + printk(KERN_ERR "PAX: bytes at PC: ");
6999 + for (i = 0; i < 5; i++) {
7001 + if (get_user(c, (unsigned int *)pc+i))
7002 + printk(KERN_CONT "???????? ");
7004 + printk(KERN_CONT "%08x ", c);
7011 * This routine handles page faults. It determines the address,
7012 * and the problem, and then passes it off to one of the appropriate
7013 @@ -206,6 +223,14 @@ bad_area:
7014 bad_area_nosemaphore:
7015 /* User mode accesses just cause a SIGSEGV */
7016 if (user_mode(regs)) {
7018 +#ifdef CONFIG_PAX_PAGEEXEC
7019 + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7020 + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7021 + do_group_exit(SIGKILL);
7025 tsk->thread.cp0_badvaddr = address;
7026 tsk->thread.error_code = write;
7027 if (show_unhandled_signals &&
7028 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7029 index 5c81fdd..db158d3 100644
7030 --- a/arch/mips/mm/mmap.c
7031 +++ b/arch/mips/mm/mmap.c
7032 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7033 struct vm_area_struct *vma;
7034 unsigned long addr = addr0;
7036 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7037 struct vm_unmapped_area_info info;
7039 if (unlikely(len > TASK_SIZE))
7040 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7043 /* requesting a specific address */
7045 +#ifdef CONFIG_PAX_RANDMMAP
7046 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7051 addr = COLOUR_ALIGN(addr, pgoff);
7052 @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7053 addr = PAGE_ALIGN(addr);
7055 vma = find_vma(mm, addr);
7056 - if (TASK_SIZE - len >= addr &&
7057 - (!vma || addr + len <= vma->vm_start))
7058 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7063 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7064 info.align_offset = pgoff << PAGE_SHIFT;
7065 + info.threadstack_offset = offset;
7068 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7069 @@ -160,45 +166,34 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7071 unsigned long random_factor = 0UL;
7073 +#ifdef CONFIG_PAX_RANDMMAP
7074 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7077 if (current->flags & PF_RANDOMIZE)
7078 random_factor = arch_mmap_rnd();
7080 if (mmap_is_legacy()) {
7081 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7083 +#ifdef CONFIG_PAX_RANDMMAP
7084 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7085 + mm->mmap_base += mm->delta_mmap;
7088 mm->get_unmapped_area = arch_get_unmapped_area;
7090 mm->mmap_base = mmap_base(random_factor);
7092 +#ifdef CONFIG_PAX_RANDMMAP
7093 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7094 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7097 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7101 -static inline unsigned long brk_rnd(void)
7103 - unsigned long rnd = get_random_int();
7105 - rnd = rnd << PAGE_SHIFT;
7106 - /* 8MB for 32bit, 256MB for 64bit */
7107 - if (TASK_IS_32BIT_ADDR)
7108 - rnd = rnd & 0x7ffffful;
7110 - rnd = rnd & 0xffffffful;
7115 -unsigned long arch_randomize_brk(struct mm_struct *mm)
7117 - unsigned long base = mm->brk;
7118 - unsigned long ret;
7120 - ret = PAGE_ALIGN(base + brk_rnd());
7122 - if (ret < mm->brk)
7128 int __virt_addr_valid(const volatile void *kaddr)
7130 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7131 diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7132 index a2358b4..7cead4f 100644
7133 --- a/arch/mips/sgi-ip27/ip27-nmi.c
7134 +++ b/arch/mips/sgi-ip27/ip27-nmi.c
7135 @@ -187,9 +187,9 @@ void
7138 #ifndef REAL_NMI_SIGNAL
7139 - static atomic_t nmied_cpus = ATOMIC_INIT(0);
7140 + static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7142 - atomic_inc(&nmied_cpus);
7143 + atomic_inc_unchecked(&nmied_cpus);
7146 * Only allow 1 cpu to proceed
7147 @@ -233,7 +233,7 @@ cont_nmi_dump(void)
7151 - while (atomic_read(&nmied_cpus) != num_online_cpus());
7152 + while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7156 diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7157 index a046b30..6799527 100644
7158 --- a/arch/mips/sni/rm200.c
7159 +++ b/arch/mips/sni/rm200.c
7160 @@ -270,7 +270,7 @@ spurious_8259A_irq:
7161 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7162 spurious_irq_mask |= irqmask;
7164 - atomic_inc(&irq_err_count);
7165 + atomic_inc_unchecked(&irq_err_count);
7167 * Theoretically we do not have to handle this IRQ,
7168 * but in Linux this does not cause problems and is
7169 diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7170 index 41e873b..34d33a7 100644
7171 --- a/arch/mips/vr41xx/common/icu.c
7172 +++ b/arch/mips/vr41xx/common/icu.c
7173 @@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7175 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7177 - atomic_inc(&irq_err_count);
7178 + atomic_inc_unchecked(&irq_err_count);
7182 diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7183 index ae0e4ee..e8f0692 100644
7184 --- a/arch/mips/vr41xx/common/irq.c
7185 +++ b/arch/mips/vr41xx/common/irq.c
7186 @@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7187 irq_cascade_t *cascade;
7189 if (irq >= NR_IRQS) {
7190 - atomic_inc(&irq_err_count);
7191 + atomic_inc_unchecked(&irq_err_count);
7195 @@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7196 ret = cascade->get_irq(irq);
7199 - atomic_inc(&irq_err_count);
7200 + atomic_inc_unchecked(&irq_err_count);
7203 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7204 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7205 index 967d144..db12197 100644
7206 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7207 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7209 #ifndef _ASM_PROC_CACHE_H
7210 #define _ASM_PROC_CACHE_H
7212 +#include <linux/const.h>
7216 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7217 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7218 -#define L1_CACHE_BYTES 16 /* bytes per entry */
7219 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7220 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7221 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7223 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7224 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7225 index bcb5df2..84fabd2 100644
7226 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7227 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7229 #ifndef _ASM_PROC_CACHE_H
7230 #define _ASM_PROC_CACHE_H
7232 +#include <linux/const.h>
7237 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7238 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7239 -#define L1_CACHE_BYTES 32 /* bytes per entry */
7240 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7241 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7242 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7244 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7245 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7246 index 4ce7a01..449202a 100644
7247 --- a/arch/openrisc/include/asm/cache.h
7248 +++ b/arch/openrisc/include/asm/cache.h
7250 #ifndef __ASM_OPENRISC_CACHE_H
7251 #define __ASM_OPENRISC_CACHE_H
7253 +#include <linux/const.h>
7255 /* FIXME: How can we replace these with values from the CPU...
7256 * they shouldn't be hard-coded!
7259 -#define L1_CACHE_BYTES 16
7260 #define L1_CACHE_SHIFT 4
7261 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7263 #endif /* __ASM_OPENRISC_CACHE_H */
7264 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7265 index 226f8ca9..9d9b87d 100644
7266 --- a/arch/parisc/include/asm/atomic.h
7267 +++ b/arch/parisc/include/asm/atomic.h
7268 @@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7272 +#define atomic64_read_unchecked(v) atomic64_read(v)
7273 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7274 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7275 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7276 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7277 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7278 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7279 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7280 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7282 #endif /* !CONFIG_64BIT */
7285 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7286 index 47f11c7..3420df2 100644
7287 --- a/arch/parisc/include/asm/cache.h
7288 +++ b/arch/parisc/include/asm/cache.h
7290 #ifndef __ARCH_PARISC_CACHE_H
7291 #define __ARCH_PARISC_CACHE_H
7293 +#include <linux/const.h>
7296 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7298 * just ruin performance.
7301 -#define L1_CACHE_BYTES 64
7302 #define L1_CACHE_SHIFT 6
7304 -#define L1_CACHE_BYTES 32
7305 #define L1_CACHE_SHIFT 5
7308 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7310 #ifndef __ASSEMBLY__
7312 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7313 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7314 index 78c9fd3..42fa66a 100644
7315 --- a/arch/parisc/include/asm/elf.h
7316 +++ b/arch/parisc/include/asm/elf.h
7317 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7319 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7321 +#ifdef CONFIG_PAX_ASLR
7322 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
7324 +#define PAX_DELTA_MMAP_LEN 16
7325 +#define PAX_DELTA_STACK_LEN 16
7328 /* This yields a mask that user programs can use to figure out what
7329 instruction set this CPU supports. This could be done in user space,
7330 but it's not easy, and we've already done it here. */
7331 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7332 index 3a08eae..08fef28 100644
7333 --- a/arch/parisc/include/asm/pgalloc.h
7334 +++ b/arch/parisc/include/asm/pgalloc.h
7335 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7336 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7339 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7341 + pgd_populate(mm, pgd, pmd);
7344 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7346 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7347 @@ -72,7 +77,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7349 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7351 - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
7352 + if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
7354 * This is the permanent pmd attached to the pgd;
7356 @@ -81,6 +86,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7361 free_pages((unsigned long)pmd, PMD_ORDER);
7364 @@ -96,6 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7365 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7366 #define pmd_free(mm, x) do { } while (0)
7367 #define pgd_populate(mm, pmd, pte) BUG()
7368 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
7372 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7373 index 0a18375..d613939 100644
7374 --- a/arch/parisc/include/asm/pgtable.h
7375 +++ b/arch/parisc/include/asm/pgtable.h
7376 @@ -213,6 +213,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7377 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7378 #define PAGE_COPY PAGE_EXECREAD
7379 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7381 +#ifdef CONFIG_PAX_PAGEEXEC
7382 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7383 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7384 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7386 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
7387 +# define PAGE_COPY_NOEXEC PAGE_COPY
7388 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
7391 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7392 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7393 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7394 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7395 index 0abdd4c..1af92f0 100644
7396 --- a/arch/parisc/include/asm/uaccess.h
7397 +++ b/arch/parisc/include/asm/uaccess.h
7398 @@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7399 const void __user *from,
7402 - int sz = __compiletime_object_size(to);
7403 + size_t sz = __compiletime_object_size(to);
7406 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7407 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7408 ret = __copy_from_user(to, from, n);
7410 copy_from_user_overflow();
7411 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7412 index 3c63a82..b1d6ee9 100644
7413 --- a/arch/parisc/kernel/module.c
7414 +++ b/arch/parisc/kernel/module.c
7417 /* three functions to determine where in the module core
7418 * or init pieces the location is */
7419 +static inline int in_init_rx(struct module *me, void *loc)
7421 + return (loc >= me->module_init_rx &&
7422 + loc < (me->module_init_rx + me->init_size_rx));
7425 +static inline int in_init_rw(struct module *me, void *loc)
7427 + return (loc >= me->module_init_rw &&
7428 + loc < (me->module_init_rw + me->init_size_rw));
7431 static inline int in_init(struct module *me, void *loc)
7433 - return (loc >= me->module_init &&
7434 - loc <= (me->module_init + me->init_size));
7435 + return in_init_rx(me, loc) || in_init_rw(me, loc);
7438 +static inline int in_core_rx(struct module *me, void *loc)
7440 + return (loc >= me->module_core_rx &&
7441 + loc < (me->module_core_rx + me->core_size_rx));
7444 +static inline int in_core_rw(struct module *me, void *loc)
7446 + return (loc >= me->module_core_rw &&
7447 + loc < (me->module_core_rw + me->core_size_rw));
7450 static inline int in_core(struct module *me, void *loc)
7452 - return (loc >= me->module_core &&
7453 - loc <= (me->module_core + me->core_size));
7454 + return in_core_rx(me, loc) || in_core_rw(me, loc);
7457 static inline int in_local(struct module *me, void *loc)
7458 @@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7461 /* align things a bit */
7462 - me->core_size = ALIGN(me->core_size, 16);
7463 - me->arch.got_offset = me->core_size;
7464 - me->core_size += gots * sizeof(struct got_entry);
7465 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7466 + me->arch.got_offset = me->core_size_rw;
7467 + me->core_size_rw += gots * sizeof(struct got_entry);
7469 - me->core_size = ALIGN(me->core_size, 16);
7470 - me->arch.fdesc_offset = me->core_size;
7471 - me->core_size += fdescs * sizeof(Elf_Fdesc);
7472 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7473 + me->arch.fdesc_offset = me->core_size_rw;
7474 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7476 me->arch.got_max = gots;
7477 me->arch.fdesc_max = fdescs;
7478 @@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7482 - got = me->module_core + me->arch.got_offset;
7483 + got = me->module_core_rw + me->arch.got_offset;
7484 for (i = 0; got[i].addr; i++)
7485 if (got[i].addr == value)
7487 @@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7489 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7491 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7492 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7495 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7496 @@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7498 /* Create new one */
7499 fdesc->addr = value;
7500 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7501 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7502 return (Elf_Addr)fdesc;
7504 #endif /* CONFIG_64BIT */
7505 @@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7507 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7508 end = table + sechdrs[me->arch.unwind_section].sh_size;
7509 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7510 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7512 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7513 me->arch.unwind_section, table, end, gp);
7514 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7515 index 5aba01a..47cdd5a 100644
7516 --- a/arch/parisc/kernel/sys_parisc.c
7517 +++ b/arch/parisc/kernel/sys_parisc.c
7518 @@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7519 unsigned long task_size = TASK_SIZE;
7520 int do_color_align, last_mmap;
7521 struct vm_unmapped_area_info info;
7522 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7524 if (len > task_size)
7526 @@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7530 +#ifdef CONFIG_PAX_RANDMMAP
7531 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7535 if (do_color_align && last_mmap)
7536 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7537 @@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7538 info.high_limit = mmap_upper_limit();
7539 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7540 info.align_offset = shared_align_offset(last_mmap, pgoff);
7541 + info.threadstack_offset = offset;
7542 addr = vm_unmapped_area(&info);
7545 @@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7546 unsigned long addr = addr0;
7547 int do_color_align, last_mmap;
7548 struct vm_unmapped_area_info info;
7549 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7552 /* This should only ever run for 32-bit processes. */
7553 @@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7556 /* requesting a specific address */
7557 +#ifdef CONFIG_PAX_RANDMMAP
7558 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7562 if (do_color_align && last_mmap)
7563 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7564 @@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7565 info.high_limit = mm->mmap_base;
7566 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7567 info.align_offset = shared_align_offset(last_mmap, pgoff);
7568 + info.threadstack_offset = offset;
7569 addr = vm_unmapped_area(&info);
7570 if (!(addr & ~PAGE_MASK))
7572 @@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7573 mm->mmap_legacy_base = mmap_legacy_base();
7574 mm->mmap_base = mmap_upper_limit();
7576 +#ifdef CONFIG_PAX_RANDMMAP
7577 + if (mm->pax_flags & MF_PAX_RANDMMAP) {
7578 + mm->mmap_legacy_base += mm->delta_mmap;
7579 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7583 if (mmap_is_legacy()) {
7584 mm->mmap_base = mm->mmap_legacy_base;
7585 mm->get_unmapped_area = arch_get_unmapped_area;
7586 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7587 index 47ee620..1107387 100644
7588 --- a/arch/parisc/kernel/traps.c
7589 +++ b/arch/parisc/kernel/traps.c
7590 @@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7592 down_read(¤t->mm->mmap_sem);
7593 vma = find_vma(current->mm,regs->iaoq[0]);
7594 - if (vma && (regs->iaoq[0] >= vma->vm_start)
7595 - && (vma->vm_flags & VM_EXEC)) {
7597 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7598 fault_address = regs->iaoq[0];
7599 fault_space = regs->iasq[0];
7601 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7602 index e5120e6..8ddb5cc 100644
7603 --- a/arch/parisc/mm/fault.c
7604 +++ b/arch/parisc/mm/fault.c
7606 #include <linux/sched.h>
7607 #include <linux/interrupt.h>
7608 #include <linux/module.h>
7609 +#include <linux/unistd.h>
7611 #include <asm/uaccess.h>
7612 #include <asm/traps.h>
7613 @@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7614 static unsigned long
7615 parisc_acctyp(unsigned long code, unsigned int inst)
7617 - if (code == 6 || code == 16)
7618 + if (code == 6 || code == 7 || code == 16)
7621 switch (inst & 0xf0000000) {
7622 @@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7626 +#ifdef CONFIG_PAX_PAGEEXEC
7628 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7630 + * returns 1 when task should be killed
7631 + * 2 when rt_sigreturn trampoline was detected
7632 + * 3 when unpatched PLT trampoline was detected
7634 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7637 +#ifdef CONFIG_PAX_EMUPLT
7640 + do { /* PaX: unpatched PLT emulation */
7641 + unsigned int bl, depwi;
7643 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7644 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7649 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7650 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7652 + err = get_user(ldw, (unsigned int *)addr);
7653 + err |= get_user(bv, (unsigned int *)(addr+4));
7654 + err |= get_user(ldw2, (unsigned int *)(addr+8));
7659 + if (ldw == 0x0E801096U &&
7660 + bv == 0xEAC0C000U &&
7661 + ldw2 == 0x0E881095U)
7663 + unsigned int resolver, map;
7665 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7666 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7670 + regs->gr[20] = instruction_pointer(regs)+8;
7671 + regs->gr[21] = map;
7672 + regs->gr[22] = resolver;
7673 + regs->iaoq[0] = resolver | 3UL;
7674 + regs->iaoq[1] = regs->iaoq[0] + 4;
7681 +#ifdef CONFIG_PAX_EMUTRAMP
7683 +#ifndef CONFIG_PAX_EMUSIGRT
7684 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7688 + do { /* PaX: rt_sigreturn emulation */
7689 + unsigned int ldi1, ldi2, bel, nop;
7691 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7692 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7693 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7694 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7699 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7700 + ldi2 == 0x3414015AU &&
7701 + bel == 0xE4008200U &&
7702 + nop == 0x08000240U)
7704 + regs->gr[25] = (ldi1 & 2) >> 1;
7705 + regs->gr[20] = __NR_rt_sigreturn;
7706 + regs->gr[31] = regs->iaoq[1] + 16;
7707 + regs->sr[0] = regs->iasq[1];
7708 + regs->iaoq[0] = 0x100UL;
7709 + regs->iaoq[1] = regs->iaoq[0] + 4;
7710 + regs->iasq[0] = regs->sr[2];
7711 + regs->iasq[1] = regs->sr[2];
7720 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7724 + printk(KERN_ERR "PAX: bytes at PC: ");
7725 + for (i = 0; i < 5; i++) {
7727 + if (get_user(c, (unsigned int *)pc+i))
7728 + printk(KERN_CONT "???????? ");
7730 + printk(KERN_CONT "%08x ", c);
7736 int fixup_exception(struct pt_regs *regs)
7738 const struct exception_table_entry *fix;
7739 @@ -234,8 +345,33 @@ retry:
7743 - if ((vma->vm_flags & acc_type) != acc_type)
7744 + if ((vma->vm_flags & acc_type) != acc_type) {
7746 +#ifdef CONFIG_PAX_PAGEEXEC
7747 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7748 + (address & ~3UL) == instruction_pointer(regs))
7750 + up_read(&mm->mmap_sem);
7751 + switch (pax_handle_fetch_fault(regs)) {
7753 +#ifdef CONFIG_PAX_EMUPLT
7758 +#ifdef CONFIG_PAX_EMUTRAMP
7764 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7765 + do_group_exit(SIGKILL);
7773 * If for any reason at all we couldn't handle the fault, make
7774 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7775 index 190cc48..48439ce 100644
7776 --- a/arch/powerpc/Kconfig
7777 +++ b/arch/powerpc/Kconfig
7778 @@ -413,6 +413,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7780 bool "kexec system call"
7781 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7782 + depends on !GRKERNSEC_KMEM
7784 kexec is a system call that implements the ability to shutdown your
7785 current kernel, and to start another kernel. It is like a reboot
7786 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7787 index 512d278..d31fadd 100644
7788 --- a/arch/powerpc/include/asm/atomic.h
7789 +++ b/arch/powerpc/include/asm/atomic.h
7792 #define ATOMIC_INIT(i) { (i) }
7794 +#define _ASM_EXTABLE(from, to) \
7795 +" .section __ex_table,\"a\"\n" \
7796 + PPC_LONG" " #from ", " #to"\n" \
7799 static __inline__ int atomic_read(const atomic_t *v)
7802 @@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7806 +static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7810 + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7815 static __inline__ void atomic_set(atomic_t *v, int i)
7817 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7820 -#define ATOMIC_OP(op, asm_op) \
7821 -static __inline__ void atomic_##op(int a, atomic_t *v) \
7822 +static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7824 + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7827 +#ifdef CONFIG_PAX_REFCOUNT
7828 +#define __REFCOUNT_OP(op) op##o.
7829 +#define __OVERFLOW_PRE \
7831 +#define __OVERFLOW_POST \
7832 + " bf 4*cr0+so, 3f\n" \
7833 + "2: .long 0x00c00b00\n" \
7835 +#define __OVERFLOW_EXTABLE \
7837 + _ASM_EXTABLE(2b, 4b)
7839 +#define __REFCOUNT_OP(op) op
7840 +#define __OVERFLOW_PRE
7841 +#define __OVERFLOW_POST
7842 +#define __OVERFLOW_EXTABLE
7845 +#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7846 +static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7850 __asm__ __volatile__( \
7851 -"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7852 +"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7854 #asm_op " %0,%2,%0\n" \
7856 PPC405_ERR77(0,%3) \
7857 " stwcx. %0,0,%3 \n" \
7860 : "=&r" (t), "+m" (v->counter) \
7861 : "r" (a), "r" (&v->counter) \
7865 -#define ATOMIC_OP_RETURN(op, asm_op) \
7866 -static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7867 +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7868 + __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7870 +#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7871 +static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7875 __asm__ __volatile__( \
7876 PPC_ATOMIC_ENTRY_BARRIER \
7877 -"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7878 +"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7880 #asm_op " %0,%1,%0\n" \
7882 PPC405_ERR77(0,%2) \
7883 " stwcx. %0,0,%2 \n" \
7886 PPC_ATOMIC_EXIT_BARRIER \
7888 : "r" (a), "r" (&v->counter) \
7889 @@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7893 +#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7894 + __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7896 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7898 ATOMIC_OPS(add, add)
7899 @@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7902 #undef ATOMIC_OP_RETURN
7903 +#undef __ATOMIC_OP_RETURN
7907 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7909 -static __inline__ void atomic_inc(atomic_t *v)
7913 + * atomic_inc - increment atomic variable
7914 + * @v: pointer of type atomic_t
7916 + * Automatically increments @v by 1
7918 +#define atomic_inc(v) atomic_add(1, (v))
7919 +#define atomic_inc_return(v) atomic_add_return(1, (v))
7921 - __asm__ __volatile__(
7922 -"1: lwarx %0,0,%2 # atomic_inc\n\
7924 - PPC405_ERR77(0,%2)
7925 -" stwcx. %0,0,%2 \n\
7927 - : "=&r" (t), "+m" (v->counter)
7928 - : "r" (&v->counter)
7930 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7932 + atomic_add_unchecked(1, v);
7935 -static __inline__ int atomic_inc_return(atomic_t *v)
7936 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7940 - __asm__ __volatile__(
7941 - PPC_ATOMIC_ENTRY_BARRIER
7942 -"1: lwarx %0,0,%1 # atomic_inc_return\n\
7944 - PPC405_ERR77(0,%1)
7945 -" stwcx. %0,0,%1 \n\
7947 - PPC_ATOMIC_EXIT_BARRIER
7949 - : "r" (&v->counter)
7950 - : "cc", "xer", "memory");
7953 + return atomic_add_return_unchecked(1, v);
7957 @@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7959 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7961 -static __inline__ void atomic_dec(atomic_t *v)
7962 +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7966 - __asm__ __volatile__(
7967 -"1: lwarx %0,0,%2 # atomic_dec\n\
7969 - PPC405_ERR77(0,%2)\
7970 -" stwcx. %0,0,%2\n\
7972 - : "=&r" (t), "+m" (v->counter)
7973 - : "r" (&v->counter)
7975 + return atomic_add_return_unchecked(1, v) == 0;
7978 -static __inline__ int atomic_dec_return(atomic_t *v)
7980 + * atomic_dec - decrement atomic variable
7981 + * @v: pointer of type atomic_t
7983 + * Atomically decrements @v by 1
7985 +#define atomic_dec(v) atomic_sub(1, (v))
7986 +#define atomic_dec_return(v) atomic_sub_return(1, (v))
7988 +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7992 - __asm__ __volatile__(
7993 - PPC_ATOMIC_ENTRY_BARRIER
7994 -"1: lwarx %0,0,%1 # atomic_dec_return\n\
7996 - PPC405_ERR77(0,%1)
7997 -" stwcx. %0,0,%1\n\
7999 - PPC_ATOMIC_EXIT_BARRIER
8001 - : "r" (&v->counter)
8002 - : "cc", "xer", "memory");
8005 + atomic_sub_unchecked(1, v);
8008 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8009 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8011 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8013 + return cmpxchg(&(v->counter), old, new);
8016 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8018 + return xchg(&(v->counter), new);
8022 * __atomic_add_unless - add unless the number is a given value
8023 * @v: pointer of type atomic_t
8024 @@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
8025 PPC_ATOMIC_ENTRY_BARRIER
8026 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
8032 +#ifdef CONFIG_PAX_REFCOUNT
8034 +" addo. %0,%2,%0\n"
8035 +" bf 4*cr0+so, 4f\n"
8036 +"3:.long " "0x00c00b00""\n"
8043 " stwcx. %0,0,%1 \n\
8047 +#ifdef CONFIG_PAX_REFCOUNT
8048 + _ASM_EXTABLE(3b, 5b)
8051 PPC_ATOMIC_EXIT_BARRIER
8054 @@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8056 #define atomic_dec_if_positive atomic_dec_if_positive
8058 +#define smp_mb__before_atomic_dec() smp_mb()
8059 +#define smp_mb__after_atomic_dec() smp_mb()
8060 +#define smp_mb__before_atomic_inc() smp_mb()
8061 +#define smp_mb__after_atomic_inc() smp_mb()
8063 #ifdef __powerpc64__
8065 #define ATOMIC64_INIT(i) { (i) }
8066 @@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8070 +static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8074 + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8079 static __inline__ void atomic64_set(atomic64_t *v, long i)
8081 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8084 -#define ATOMIC64_OP(op, asm_op) \
8085 -static __inline__ void atomic64_##op(long a, atomic64_t *v) \
8086 +static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8088 + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8091 +#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8092 +static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8096 __asm__ __volatile__( \
8097 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8099 #asm_op " %0,%2,%0\n" \
8101 " stdcx. %0,0,%3 \n" \
8104 : "=&r" (t), "+m" (v->counter) \
8105 : "r" (a), "r" (&v->counter) \
8109 -#define ATOMIC64_OP_RETURN(op, asm_op) \
8110 -static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8111 +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8112 + __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8114 +#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8115 +static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8119 __asm__ __volatile__( \
8120 PPC_ATOMIC_ENTRY_BARRIER \
8121 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8123 #asm_op " %0,%1,%0\n" \
8125 " stdcx. %0,0,%2 \n" \
8128 PPC_ATOMIC_EXIT_BARRIER \
8130 : "r" (a), "r" (&v->counter) \
8131 @@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8135 +#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8136 + __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8138 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8140 ATOMIC64_OPS(add, add)
8141 @@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8144 #undef ATOMIC64_OP_RETURN
8145 +#undef __ATOMIC64_OP_RETURN
8147 +#undef __ATOMIC64_OP
8148 +#undef __OVERFLOW_EXTABLE
8149 +#undef __OVERFLOW_POST
8150 +#undef __OVERFLOW_PRE
8151 +#undef __REFCOUNT_OP
8153 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8155 -static __inline__ void atomic64_inc(atomic64_t *v)
8159 + * atomic64_inc - increment atomic variable
8160 + * @v: pointer of type atomic64_t
8162 + * Automatically increments @v by 1
8164 +#define atomic64_inc(v) atomic64_add(1, (v))
8165 +#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8167 - __asm__ __volatile__(
8168 -"1: ldarx %0,0,%2 # atomic64_inc\n\
8170 - stdcx. %0,0,%2 \n\
8172 - : "=&r" (t), "+m" (v->counter)
8173 - : "r" (&v->counter)
8175 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8177 + atomic64_add_unchecked(1, v);
8180 -static __inline__ long atomic64_inc_return(atomic64_t *v)
8181 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8185 - __asm__ __volatile__(
8186 - PPC_ATOMIC_ENTRY_BARRIER
8187 -"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8189 - stdcx. %0,0,%1 \n\
8191 - PPC_ATOMIC_EXIT_BARRIER
8193 - : "r" (&v->counter)
8194 - : "cc", "xer", "memory");
8197 + return atomic64_add_return_unchecked(1, v);
8201 @@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8203 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8205 -static __inline__ void atomic64_dec(atomic64_t *v)
8207 + * atomic64_dec - decrement atomic variable
8208 + * @v: pointer of type atomic64_t
8210 + * Atomically decrements @v by 1
8212 +#define atomic64_dec(v) atomic64_sub(1, (v))
8213 +#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8215 +static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8219 - __asm__ __volatile__(
8220 -"1: ldarx %0,0,%2 # atomic64_dec\n\
8224 - : "=&r" (t), "+m" (v->counter)
8225 - : "r" (&v->counter)
8229 -static __inline__ long atomic64_dec_return(atomic64_t *v)
8233 - __asm__ __volatile__(
8234 - PPC_ATOMIC_ENTRY_BARRIER
8235 -"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8239 - PPC_ATOMIC_EXIT_BARRIER
8241 - : "r" (&v->counter)
8242 - : "cc", "xer", "memory");
8245 + atomic64_sub_unchecked(1, v);
8248 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8249 @@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8250 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8251 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8253 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8255 + return cmpxchg(&(v->counter), old, new);
8258 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8260 + return xchg(&(v->counter), new);
8264 * atomic64_add_unless - add unless the number is a given value
8265 * @v: pointer of type atomic64_t
8266 @@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8268 __asm__ __volatile__ (
8269 PPC_ATOMIC_ENTRY_BARRIER
8270 -"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8271 +"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8277 +#ifdef CONFIG_PAX_REFCOUNT
8279 +" addo. %0,%2,%0\n"
8280 +" bf 4*cr0+so, 4f\n"
8281 +"3:.long " "0x00c00b00""\n"
8287 " stdcx. %0,0,%1 \n\
8289 PPC_ATOMIC_EXIT_BARRIER
8292 +#ifdef CONFIG_PAX_REFCOUNT
8293 + _ASM_EXTABLE(3b, 5b)
8299 diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8300 index a3bf5be..e03ba81 100644
8301 --- a/arch/powerpc/include/asm/barrier.h
8302 +++ b/arch/powerpc/include/asm/barrier.h
8305 compiletime_assert_atomic_type(*p); \
8307 - ACCESS_ONCE(*p) = (v); \
8308 + ACCESS_ONCE_RW(*p) = (v); \
8311 #define smp_load_acquire(p) \
8312 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8313 index 0dc42c5..b80a3a1 100644
8314 --- a/arch/powerpc/include/asm/cache.h
8315 +++ b/arch/powerpc/include/asm/cache.h
8319 #include <asm/reg.h>
8320 +#include <linux/const.h>
8322 /* bytes per L1 cache line */
8323 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8325 #define L1_CACHE_SHIFT 7
8328 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8329 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8331 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8333 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8334 index ee46ffe..b36c98c 100644
8335 --- a/arch/powerpc/include/asm/elf.h
8336 +++ b/arch/powerpc/include/asm/elf.h
8339 #define ELF_ET_DYN_BASE 0x20000000
8341 +#ifdef CONFIG_PAX_ASLR
8342 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8344 +#ifdef __powerpc64__
8345 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8346 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8348 +#define PAX_DELTA_MMAP_LEN 15
8349 +#define PAX_DELTA_STACK_LEN 15
8353 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8356 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8357 index 8196e9c..d83a9f3 100644
8358 --- a/arch/powerpc/include/asm/exec.h
8359 +++ b/arch/powerpc/include/asm/exec.h
8361 #ifndef _ASM_POWERPC_EXEC_H
8362 #define _ASM_POWERPC_EXEC_H
8364 -extern unsigned long arch_align_stack(unsigned long sp);
8365 +#define arch_align_stack(x) ((x) & ~0xfUL)
8367 #endif /* _ASM_POWERPC_EXEC_H */
8368 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8369 index 5acabbd..7ea14fa 100644
8370 --- a/arch/powerpc/include/asm/kmap_types.h
8371 +++ b/arch/powerpc/include/asm/kmap_types.h
8373 * 2 of the License, or (at your option) any later version.
8376 -#define KM_TYPE_NR 16
8377 +#define KM_TYPE_NR 17
8379 #endif /* __KERNEL__ */
8380 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8381 diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8382 index b8da913..c02b593 100644
8383 --- a/arch/powerpc/include/asm/local.h
8384 +++ b/arch/powerpc/include/asm/local.h
8385 @@ -9,21 +9,65 @@ typedef struct
8391 + atomic_long_unchecked_t a;
8392 +} local_unchecked_t;
8394 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8396 #define local_read(l) atomic_long_read(&(l)->a)
8397 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8398 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8399 +#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8401 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8402 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8403 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8404 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8405 #define local_inc(l) atomic_long_inc(&(l)->a)
8406 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8407 #define local_dec(l) atomic_long_dec(&(l)->a)
8408 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8410 static __inline__ long local_add_return(long a, local_t *l)
8414 __asm__ __volatile__(
8415 +"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8417 +#ifdef CONFIG_PAX_REFCOUNT
8419 +" addo. %0,%1,%0\n"
8420 +" bf 4*cr0+so, 3f\n"
8421 +"2:.long " "0x00c00b00""\n"
8427 + PPC405_ERR77(0,%2)
8428 + PPC_STLCX "%0,0,%2 \n\
8431 +#ifdef CONFIG_PAX_REFCOUNT
8433 + _ASM_EXTABLE(2b, 4b)
8437 + : "r" (a), "r" (&(l->a.counter))
8438 + : "cc", "memory");
8443 +static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8447 + __asm__ __volatile__(
8448 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8451 @@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8453 #define local_cmpxchg(l, o, n) \
8454 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8455 +#define local_cmpxchg_unchecked(l, o, n) \
8456 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
8457 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8460 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8461 index 8565c25..2865190 100644
8462 --- a/arch/powerpc/include/asm/mman.h
8463 +++ b/arch/powerpc/include/asm/mman.h
8464 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8466 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8468 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8469 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8471 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8473 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8474 index 69c0598..2c56964 100644
8475 --- a/arch/powerpc/include/asm/page.h
8476 +++ b/arch/powerpc/include/asm/page.h
8477 @@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8478 * and needs to be executable. This means the whole heap ends
8479 * up being executable.
8481 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8482 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8483 +#define VM_DATA_DEFAULT_FLAGS32 \
8484 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8485 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8487 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8488 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8489 @@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8490 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8493 +#define ktla_ktva(addr) (addr)
8494 +#define ktva_ktla(addr) (addr)
8496 #ifndef CONFIG_PPC_BOOK3S_64
8498 * Use the top bit of the higher-level page table entries to indicate whether
8499 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8500 index d908a46..3753f71 100644
8501 --- a/arch/powerpc/include/asm/page_64.h
8502 +++ b/arch/powerpc/include/asm/page_64.h
8503 @@ -172,15 +172,18 @@ do { \
8504 * stack by default, so in the absence of a PT_GNU_STACK program header
8505 * we turn execute permission off.
8507 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8508 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8509 +#define VM_STACK_DEFAULT_FLAGS32 \
8510 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8511 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8513 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8514 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8516 +#ifndef CONFIG_PAX_PAGEEXEC
8517 #define VM_STACK_DEFAULT_FLAGS \
8518 (is_32bit_task() ? \
8519 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8522 #include <asm-generic/getorder.h>
8524 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8525 index 4b0be20..c15a27d 100644
8526 --- a/arch/powerpc/include/asm/pgalloc-64.h
8527 +++ b/arch/powerpc/include/asm/pgalloc-64.h
8528 @@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8529 #ifndef CONFIG_PPC_64K_PAGES
8531 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8532 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8534 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8536 @@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8537 pud_set(pud, (unsigned long)pmd);
8540 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8542 + pud_populate(mm, pud, pmd);
8545 #define pmd_populate(mm, pmd, pte_page) \
8546 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8547 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8548 @@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8551 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8552 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8554 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8556 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8557 index 11a3863..108f194 100644
8558 --- a/arch/powerpc/include/asm/pgtable.h
8559 +++ b/arch/powerpc/include/asm/pgtable.h
8561 #define _ASM_POWERPC_PGTABLE_H
8564 +#include <linux/const.h>
8565 #ifndef __ASSEMBLY__
8566 #include <linux/mmdebug.h>
8567 #include <linux/mmzone.h>
8568 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8569 index 62cfb0c..50c6402 100644
8570 --- a/arch/powerpc/include/asm/pte-hash32.h
8571 +++ b/arch/powerpc/include/asm/pte-hash32.h
8573 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8574 #define _PAGE_USER 0x004 /* usermode access allowed */
8575 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8576 +#define _PAGE_EXEC _PAGE_GUARDED
8577 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8578 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8579 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8580 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8581 index af56b5c..f86f3f6 100644
8582 --- a/arch/powerpc/include/asm/reg.h
8583 +++ b/arch/powerpc/include/asm/reg.h
8585 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8586 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8587 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8588 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8589 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8590 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8591 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8592 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8593 index 825663c..f9e9134 100644
8594 --- a/arch/powerpc/include/asm/smp.h
8595 +++ b/arch/powerpc/include/asm/smp.h
8596 @@ -51,7 +51,7 @@ struct smp_ops_t {
8597 int (*cpu_disable)(void);
8598 void (*cpu_die)(unsigned int nr);
8599 int (*cpu_bootable)(unsigned int nr);
8603 extern void smp_send_debugger_break(void);
8604 extern void start_secondary_resume(void);
8605 diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8606 index 4dbe072..b803275 100644
8607 --- a/arch/powerpc/include/asm/spinlock.h
8608 +++ b/arch/powerpc/include/asm/spinlock.h
8609 @@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8610 __asm__ __volatile__(
8611 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8613 -" addic. %0,%0,1\n\
8616 +#ifdef CONFIG_PAX_REFCOUNT
8618 +" addico. %0,%0,1\n"
8619 +" bf 4*cr0+so, 3f\n"
8620 +"2:.long " "0x00c00b00""\n"
8622 +" addic. %0,%0,1\n"
8634 +#ifdef CONFIG_PAX_REFCOUNT
8635 + _ASM_EXTABLE(2b,4b)
8640 : "cr0", "xer", "memory");
8642 @@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8643 __asm__ __volatile__(
8646 -"1: lwarx %0,0,%1\n\
8648 +"1: lwarx %0,0,%1\n"
8650 +#ifdef CONFIG_PAX_REFCOUNT
8652 +" addico. %0,%0,-1\n"
8653 +" bf 4*cr0+so, 3f\n"
8654 +"2:.long " "0x00c00b00""\n"
8656 +" addic. %0,%0,-1\n"
8664 +#ifdef CONFIG_PAX_REFCOUNT
8666 + _ASM_EXTABLE(2b, 4b)
8671 : "cr0", "xer", "memory");
8672 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8673 index 7efee4a..48d47cc 100644
8674 --- a/arch/powerpc/include/asm/thread_info.h
8675 +++ b/arch/powerpc/include/asm/thread_info.h
8676 @@ -101,6 +101,8 @@ static inline struct thread_info *current_thread_info(void)
8677 #if defined(CONFIG_PPC64)
8678 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8680 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8681 +#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8683 /* as above, but as bit values */
8684 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8685 @@ -119,9 +121,10 @@ static inline struct thread_info *current_thread_info(void)
8686 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8687 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8688 #define _TIF_NOHZ (1<<TIF_NOHZ)
8689 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8690 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8691 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8693 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
8695 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8696 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8697 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8698 index a0c071d..49cdc7f 100644
8699 --- a/arch/powerpc/include/asm/uaccess.h
8700 +++ b/arch/powerpc/include/asm/uaccess.h
8705 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8706 #define access_ok(type, addr, size) \
8707 (__chk_user_ptr(addr), \
8708 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8709 @@ -318,52 +319,6 @@ do { \
8710 extern unsigned long __copy_tofrom_user(void __user *to,
8711 const void __user *from, unsigned long size);
8713 -#ifndef __powerpc64__
8715 -static inline unsigned long copy_from_user(void *to,
8716 - const void __user *from, unsigned long n)
8718 - unsigned long over;
8720 - if (access_ok(VERIFY_READ, from, n))
8721 - return __copy_tofrom_user((__force void __user *)to, from, n);
8722 - if ((unsigned long)from < TASK_SIZE) {
8723 - over = (unsigned long)from + n - TASK_SIZE;
8724 - return __copy_tofrom_user((__force void __user *)to, from,
8730 -static inline unsigned long copy_to_user(void __user *to,
8731 - const void *from, unsigned long n)
8733 - unsigned long over;
8735 - if (access_ok(VERIFY_WRITE, to, n))
8736 - return __copy_tofrom_user(to, (__force void __user *)from, n);
8737 - if ((unsigned long)to < TASK_SIZE) {
8738 - over = (unsigned long)to + n - TASK_SIZE;
8739 - return __copy_tofrom_user(to, (__force void __user *)from,
8745 -#else /* __powerpc64__ */
8747 -#define __copy_in_user(to, from, size) \
8748 - __copy_tofrom_user((to), (from), (size))
8750 -extern unsigned long copy_from_user(void *to, const void __user *from,
8752 -extern unsigned long copy_to_user(void __user *to, const void *from,
8754 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
8757 -#endif /* __powerpc64__ */
8759 static inline unsigned long __copy_from_user_inatomic(void *to,
8760 const void __user *from, unsigned long n)
8762 @@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8767 + if (!__builtin_constant_p(n))
8768 + check_object_size(to, n, false);
8770 return __copy_tofrom_user((__force void __user *)to, from, n);
8773 @@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8778 + if (!__builtin_constant_p(n))
8779 + check_object_size(from, n, true);
8781 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8784 @@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8785 return __copy_to_user_inatomic(to, from, size);
8788 +#ifndef __powerpc64__
8790 +static inline unsigned long __must_check copy_from_user(void *to,
8791 + const void __user *from, unsigned long n)
8793 + unsigned long over;
8798 + if (access_ok(VERIFY_READ, from, n)) {
8799 + if (!__builtin_constant_p(n))
8800 + check_object_size(to, n, false);
8801 + return __copy_tofrom_user((__force void __user *)to, from, n);
8803 + if ((unsigned long)from < TASK_SIZE) {
8804 + over = (unsigned long)from + n - TASK_SIZE;
8805 + if (!__builtin_constant_p(n - over))
8806 + check_object_size(to, n - over, false);
8807 + return __copy_tofrom_user((__force void __user *)to, from,
8813 +static inline unsigned long __must_check copy_to_user(void __user *to,
8814 + const void *from, unsigned long n)
8816 + unsigned long over;
8821 + if (access_ok(VERIFY_WRITE, to, n)) {
8822 + if (!__builtin_constant_p(n))
8823 + check_object_size(from, n, true);
8824 + return __copy_tofrom_user(to, (__force void __user *)from, n);
8826 + if ((unsigned long)to < TASK_SIZE) {
8827 + over = (unsigned long)to + n - TASK_SIZE;
8828 + if (!__builtin_constant_p(n))
8829 + check_object_size(from, n - over, true);
8830 + return __copy_tofrom_user(to, (__force void __user *)from,
8836 +#else /* __powerpc64__ */
8838 +#define __copy_in_user(to, from, size) \
8839 + __copy_tofrom_user((to), (from), (size))
8841 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8843 + if ((long)n < 0 || n > INT_MAX)
8846 + if (!__builtin_constant_p(n))
8847 + check_object_size(to, n, false);
8849 + if (likely(access_ok(VERIFY_READ, from, n)))
8850 + n = __copy_from_user(to, from, n);
8856 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8858 + if ((long)n < 0 || n > INT_MAX)
8861 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
8862 + if (!__builtin_constant_p(n))
8863 + check_object_size(from, n, true);
8864 + n = __copy_to_user(to, from, n);
8869 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
8872 +#endif /* __powerpc64__ */
8874 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8876 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8877 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8878 index c1ebbda..fd8a98d 100644
8879 --- a/arch/powerpc/kernel/Makefile
8880 +++ b/arch/powerpc/kernel/Makefile
8881 @@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8882 CFLAGS_btext.o += -fPIC
8885 +CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8886 +CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8887 +CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8888 +CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8890 ifdef CONFIG_FUNCTION_TRACER
8891 # Do not trace early boot code
8892 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8893 @@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8894 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8897 +CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8899 obj-y := cputable.o ptrace.o syscalls.o \
8900 irq.o align.o signal_32.o pmc.o vdso.o \
8901 process.o systbl.o idle.o \
8902 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8903 index 3e68d1c..72a5ee6 100644
8904 --- a/arch/powerpc/kernel/exceptions-64e.S
8905 +++ b/arch/powerpc/kernel/exceptions-64e.S
8906 @@ -1010,6 +1010,7 @@ storage_fault_common:
8909 addi r3,r1,STACK_FRAME_OVERHEAD
8913 ld r14,PACA_EXGEN+EX_R14(r13)
8914 @@ -1018,8 +1019,7 @@ storage_fault_common:
8917 b ret_from_except_lite
8921 addi r3,r1,STACK_FRAME_OVERHEAD
8924 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8925 index 9519e6b..13f6c38 100644
8926 --- a/arch/powerpc/kernel/exceptions-64s.S
8927 +++ b/arch/powerpc/kernel/exceptions-64s.S
8928 @@ -1599,10 +1599,10 @@ handle_page_fault:
8931 addi r3,r1,STACK_FRAME_OVERHEAD
8938 addi r3,r1,STACK_FRAME_OVERHEAD
8940 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8941 index 4509603..cdb491f 100644
8942 --- a/arch/powerpc/kernel/irq.c
8943 +++ b/arch/powerpc/kernel/irq.c
8944 @@ -460,6 +460,8 @@ void migrate_irqs(void)
8948 +extern void gr_handle_kernel_exploit(void);
8950 static inline void check_stack_overflow(void)
8952 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8953 @@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8954 pr_err("do_IRQ: stack overflow: %ld\n",
8955 sp - sizeof(struct thread_info));
8957 + gr_handle_kernel_exploit();
8961 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8962 index c94d2e0..992a9ce 100644
8963 --- a/arch/powerpc/kernel/module_32.c
8964 +++ b/arch/powerpc/kernel/module_32.c
8965 @@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8966 me->arch.core_plt_section = i;
8968 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8969 - pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8970 + pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8974 @@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8976 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8977 /* Init, or core PLT? */
8978 - if (location >= mod->module_core
8979 - && location < mod->module_core + mod->core_size)
8980 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8981 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8982 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8984 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8985 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8986 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8988 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8992 /* Find this entry, or if that fails, the next avail. entry */
8993 while (entry->jump[0]) {
8994 @@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8996 #ifdef CONFIG_DYNAMIC_FTRACE
8997 module->arch.tramp =
8998 - do_plt_call(module->module_core,
8999 + do_plt_call(module->module_core_rx,
9000 (unsigned long)ftrace_caller,
9003 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9004 index febb50d..bb10020 100644
9005 --- a/arch/powerpc/kernel/process.c
9006 +++ b/arch/powerpc/kernel/process.c
9007 @@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
9008 * Lookup NIP late so we have the best change of getting the
9009 * above info out without failing
9011 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9012 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9013 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9014 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9016 show_stack(current, (unsigned long *) regs->gpr[1]);
9017 if (!user_mode(regs))
9018 @@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9020 ip = stack[STACK_FRAME_LR_SAVE];
9021 if (!firstframe || ip != lr) {
9022 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9023 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9024 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9025 if ((ip == rth) && curr_frame >= 0) {
9028 (void *)current->ret_stack[curr_frame].ret);
9031 @@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9032 struct pt_regs *regs = (struct pt_regs *)
9033 (sp + STACK_FRAME_OVERHEAD);
9035 - printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9036 + printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9037 regs->trap, (void *)regs->nip, (void *)lr);
9040 @@ -1613,49 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
9041 mtspr(SPRN_CTRLT, ctrl);
9043 #endif /* CONFIG_PPC64 */
9045 -unsigned long arch_align_stack(unsigned long sp)
9047 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9048 - sp -= get_random_int() & ~PAGE_MASK;
9052 -static inline unsigned long brk_rnd(void)
9054 - unsigned long rnd = 0;
9056 - /* 8MB for 32bit, 1GB for 64bit */
9057 - if (is_32bit_task())
9058 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9060 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9062 - return rnd << PAGE_SHIFT;
9065 -unsigned long arch_randomize_brk(struct mm_struct *mm)
9067 - unsigned long base = mm->brk;
9068 - unsigned long ret;
9070 -#ifdef CONFIG_PPC_STD_MMU_64
9072 - * If we are using 1TB segments and we are allowed to randomise
9073 - * the heap, we can put it above 1TB so it is backed by a 1TB
9074 - * segment. Otherwise the heap will be in the bottom 1TB
9075 - * which always uses 256MB segments and this may result in a
9076 - * performance penalty.
9078 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9079 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9082 - ret = PAGE_ALIGN(base + brk_rnd());
9084 - if (ret < mm->brk)
9090 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9091 index f21897b..28c0428 100644
9092 --- a/arch/powerpc/kernel/ptrace.c
9093 +++ b/arch/powerpc/kernel/ptrace.c
9094 @@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9098 +#ifdef CONFIG_GRKERNSEC_SETXID
9099 +extern void gr_delayed_cred_worker(void);
9103 * We must return the syscall number to actually look up in the table.
9104 * This can be -1L to skip running any syscall at all.
9105 @@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9107 secure_computing_strict(regs->gpr[0]);
9109 +#ifdef CONFIG_GRKERNSEC_SETXID
9110 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9111 + gr_delayed_cred_worker();
9114 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9115 tracehook_report_syscall_entry(regs))
9117 @@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9121 +#ifdef CONFIG_GRKERNSEC_SETXID
9122 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9123 + gr_delayed_cred_worker();
9126 audit_syscall_exit(regs);
9128 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9129 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9130 index d3a831a..3a33123 100644
9131 --- a/arch/powerpc/kernel/signal_32.c
9132 +++ b/arch/powerpc/kernel/signal_32.c
9133 @@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9134 /* Save user registers on the stack */
9135 frame = &rt_sf->uc.uc_mcontext;
9137 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9138 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9140 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9142 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9143 index c7c24d2..1bf7039 100644
9144 --- a/arch/powerpc/kernel/signal_64.c
9145 +++ b/arch/powerpc/kernel/signal_64.c
9146 @@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9147 current->thread.fp_state.fpscr = 0;
9149 /* Set up to return from userspace. */
9150 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9151 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9152 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9154 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9155 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9156 index 19e4744..28a8d7b 100644
9157 --- a/arch/powerpc/kernel/traps.c
9158 +++ b/arch/powerpc/kernel/traps.c
9160 #include <linux/debugfs.h>
9161 #include <linux/ratelimit.h>
9162 #include <linux/context_tracking.h>
9163 +#include <linux/uaccess.h>
9165 #include <asm/emulated_ops.h>
9166 #include <asm/pgtable.h>
9167 @@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9171 +extern void gr_handle_kernel_exploit(void);
9173 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9176 @@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9177 panic("Fatal exception in interrupt");
9179 panic("Fatal exception");
9181 + gr_handle_kernel_exploit();
9186 @@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9187 enum ctx_state prev_state = exception_enter();
9188 unsigned int reason = get_reason(regs);
9190 +#ifdef CONFIG_PAX_REFCOUNT
9191 + unsigned int bkpt;
9192 + const struct exception_table_entry *entry;
9194 + if (reason & REASON_ILLEGAL) {
9195 + /* Check if PaX bad instruction */
9196 + if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9197 + current->thread.trap_nr = 0;
9198 + pax_report_refcount_overflow(regs);
9199 + /* fixup_exception() for PowerPC does not exist, simulate its job */
9200 + if ((entry = search_exception_tables(regs->nip)) != NULL) {
9201 + regs->nip = entry->fixup;
9204 + /* fixup_exception() could not handle */
9210 /* We can now get here via a FP Unavailable exception if the core
9211 * has no FPU, in that case the reason flags will be 0 */
9213 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9214 index 305eb0d..accc5b40 100644
9215 --- a/arch/powerpc/kernel/vdso.c
9216 +++ b/arch/powerpc/kernel/vdso.c
9218 #include <asm/vdso.h>
9219 #include <asm/vdso_datapage.h>
9220 #include <asm/setup.h>
9221 +#include <asm/mman.h>
9225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9226 vdso_base = VDSO32_MBASE;
9229 - current->mm->context.vdso_base = 0;
9230 + current->mm->context.vdso_base = ~0UL;
9232 /* vDSO has a problem and was disabled, just don't "enable" it for the
9234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9235 vdso_base = get_unmapped_area(NULL, vdso_base,
9236 (vdso_pages << PAGE_SHIFT) +
9237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
9240 if (IS_ERR_VALUE(vdso_base)) {
9243 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9244 index ac3ddf1..9a54c76 100644
9245 --- a/arch/powerpc/kvm/powerpc.c
9246 +++ b/arch/powerpc/kvm/powerpc.c
9247 @@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9249 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9251 -int kvm_arch_init(void *opaque)
9252 +int kvm_arch_init(const void *opaque)
9256 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9257 index 5eea6f3..5d10396 100644
9258 --- a/arch/powerpc/lib/usercopy_64.c
9259 +++ b/arch/powerpc/lib/usercopy_64.c
9261 #include <linux/module.h>
9262 #include <asm/uaccess.h>
9264 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9266 - if (likely(access_ok(VERIFY_READ, from, n)))
9267 - n = __copy_from_user(to, from, n);
9273 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9275 - if (likely(access_ok(VERIFY_WRITE, to, n)))
9276 - n = __copy_to_user(to, from, n);
9280 unsigned long copy_in_user(void __user *to, const void __user *from,
9283 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9287 -EXPORT_SYMBOL(copy_from_user);
9288 -EXPORT_SYMBOL(copy_to_user);
9289 EXPORT_SYMBOL(copy_in_user);
9291 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9292 index b396868..3eb6b9f 100644
9293 --- a/arch/powerpc/mm/fault.c
9294 +++ b/arch/powerpc/mm/fault.c
9296 #include <linux/ratelimit.h>
9297 #include <linux/context_tracking.h>
9298 #include <linux/hugetlb.h>
9299 +#include <linux/slab.h>
9300 +#include <linux/pagemap.h>
9301 +#include <linux/compiler.h>
9302 +#include <linux/unistd.h>
9304 #include <asm/firmware.h>
9305 #include <asm/page.h>
9306 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9310 +#ifdef CONFIG_PAX_PAGEEXEC
9312 + * PaX: decide what to do with offenders (regs->nip = fault address)
9314 + * returns 1 when task should be killed
9316 +static int pax_handle_fetch_fault(struct pt_regs *regs)
9321 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9325 + printk(KERN_ERR "PAX: bytes at PC: ");
9326 + for (i = 0; i < 5; i++) {
9328 + if (get_user(c, (unsigned int __user *)pc+i))
9329 + printk(KERN_CONT "???????? ");
9331 + printk(KERN_CONT "%08x ", c);
9338 * Check whether the instruction at regs->nip is a store using
9339 * an update addressing form which will update r1.
9340 @@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9341 * indicate errors in DSISR but can validly be set in SRR1.
9344 - error_code &= 0x48200000;
9345 + error_code &= 0x58200000;
9347 is_write = error_code & DSISR_ISSTORE;
9349 @@ -383,12 +414,16 @@ good_area:
9350 * "undefined". Of those that can be set, this is the only
9351 * one which seems bad.
9353 - if (error_code & 0x10000000)
9354 + if (error_code & DSISR_GUARDED)
9355 /* Guarded storage error. */
9357 #endif /* CONFIG_8xx */
9360 +#ifdef CONFIG_PPC_STD_MMU
9361 + if (error_code & DSISR_GUARDED)
9365 * Allow execution from readable areas if the MMU does not
9366 * provide separate controls over reading and executing.
9367 @@ -483,6 +518,23 @@ bad_area:
9368 bad_area_nosemaphore:
9369 /* User mode accesses cause a SIGSEGV */
9370 if (user_mode(regs)) {
9372 +#ifdef CONFIG_PAX_PAGEEXEC
9373 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9374 +#ifdef CONFIG_PPC_STD_MMU
9375 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9377 + if (is_exec && regs->nip == address) {
9379 + switch (pax_handle_fetch_fault(regs)) {
9382 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9383 + do_group_exit(SIGKILL);
9388 _exception(SIGSEGV, regs, code, address);
9391 diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9392 index 0f0502e..bc3e7a3 100644
9393 --- a/arch/powerpc/mm/mmap.c
9394 +++ b/arch/powerpc/mm/mmap.c
9395 @@ -86,6 +86,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9397 unsigned long random_factor = 0UL;
9399 +#ifdef CONFIG_PAX_RANDMMAP
9400 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9403 if (current->flags & PF_RANDOMIZE)
9404 random_factor = arch_mmap_rnd();
9406 @@ -95,9 +99,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9408 if (mmap_is_legacy()) {
9409 mm->mmap_base = TASK_UNMAPPED_BASE;
9411 +#ifdef CONFIG_PAX_RANDMMAP
9412 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9413 + mm->mmap_base += mm->delta_mmap;
9416 mm->get_unmapped_area = arch_get_unmapped_area;
9418 mm->mmap_base = mmap_base(random_factor);
9420 +#ifdef CONFIG_PAX_RANDMMAP
9421 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9422 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9425 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9428 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9429 index 0f432a7..abfe841 100644
9430 --- a/arch/powerpc/mm/slice.c
9431 +++ b/arch/powerpc/mm/slice.c
9432 @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9433 if ((mm->task_size - len) < addr)
9435 vma = find_vma(mm, addr);
9436 - return (!vma || (addr + len) <= vma->vm_start);
9437 + return check_heap_stack_gap(vma, addr, len, 0);
9440 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9441 @@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9442 info.align_offset = 0;
9444 addr = TASK_UNMAPPED_BASE;
9446 +#ifdef CONFIG_PAX_RANDMMAP
9447 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9448 + addr += mm->delta_mmap;
9451 while (addr < TASK_SIZE) {
9452 info.low_limit = addr;
9453 if (!slice_scan_available(addr, available, 1, &addr))
9454 @@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9455 if (fixed && addr > (mm->task_size - len))
9458 +#ifdef CONFIG_PAX_RANDMMAP
9459 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9463 /* If hint, make sure it matches our alignment restrictions */
9464 if (!fixed && addr) {
9465 addr = _ALIGN_UP(addr, 1ul << pshift);
9466 diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9467 index d966bbe..372124a 100644
9468 --- a/arch/powerpc/platforms/cell/spufs/file.c
9469 +++ b/arch/powerpc/platforms/cell/spufs/file.c
9470 @@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9471 return VM_FAULT_NOPAGE;
9474 -static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9475 +static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9476 unsigned long address,
9477 - void *buf, int len, int write)
9478 + void *buf, size_t len, int write)
9480 struct spu_context *ctx = vma->vm_file->private_data;
9481 unsigned long offset = address - vma->vm_start;
9482 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9483 index adbe380..adb7516 100644
9484 --- a/arch/s390/include/asm/atomic.h
9485 +++ b/arch/s390/include/asm/atomic.h
9486 @@ -317,4 +317,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9487 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9488 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9490 +#define atomic64_read_unchecked(v) atomic64_read(v)
9491 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9492 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9493 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9494 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9495 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
9496 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9497 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
9498 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9500 #endif /* __ARCH_S390_ATOMIC__ */
9501 diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9502 index 8d72471..5322500 100644
9503 --- a/arch/s390/include/asm/barrier.h
9504 +++ b/arch/s390/include/asm/barrier.h
9507 compiletime_assert_atomic_type(*p); \
9509 - ACCESS_ONCE(*p) = (v); \
9510 + ACCESS_ONCE_RW(*p) = (v); \
9513 #define smp_load_acquire(p) \
9514 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9515 index 4d7ccac..d03d0ad 100644
9516 --- a/arch/s390/include/asm/cache.h
9517 +++ b/arch/s390/include/asm/cache.h
9519 #ifndef __ARCH_S390_CACHE_H
9520 #define __ARCH_S390_CACHE_H
9522 -#define L1_CACHE_BYTES 256
9523 +#include <linux/const.h>
9525 #define L1_CACHE_SHIFT 8
9526 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9527 #define NET_SKB_PAD 32
9529 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9530 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9531 index 3ad48f2..64cc6f3 100644
9532 --- a/arch/s390/include/asm/elf.h
9533 +++ b/arch/s390/include/asm/elf.h
9534 @@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
9535 (STACK_TOP / 3 * 2) : \
9536 (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
9538 +#ifdef CONFIG_PAX_ASLR
9539 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9541 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9542 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9545 /* This yields a mask that user programs can use to figure out what
9546 instruction set this CPU supports. */
9548 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9549 index c4a93d6..4d2a9b4 100644
9550 --- a/arch/s390/include/asm/exec.h
9551 +++ b/arch/s390/include/asm/exec.h
9553 #ifndef __ASM_EXEC_H
9554 #define __ASM_EXEC_H
9556 -extern unsigned long arch_align_stack(unsigned long sp);
9557 +#define arch_align_stack(x) ((x) & ~0xfUL)
9559 #endif /* __ASM_EXEC_H */
9560 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9561 index d64a7a6..0830329 100644
9562 --- a/arch/s390/include/asm/uaccess.h
9563 +++ b/arch/s390/include/asm/uaccess.h
9564 @@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9565 __range_ok((unsigned long)(addr), (size)); \
9568 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9569 #define access_ok(type, addr, size) __access_ok(addr, size)
9572 @@ -275,6 +276,10 @@ static inline unsigned long __must_check
9573 copy_to_user(void __user *to, const void *from, unsigned long n)
9580 return __copy_to_user(to, from, n);
9583 @@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9584 static inline unsigned long __must_check
9585 copy_from_user(void *to, const void __user *from, unsigned long n)
9587 - unsigned int sz = __compiletime_object_size(to);
9588 + size_t sz = __compiletime_object_size(to);
9591 - if (unlikely(sz != -1 && sz < n)) {
9596 + if (unlikely(sz != (size_t)-1 && sz < n)) {
9597 copy_from_user_overflow();
9600 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9601 index 0c1a679..e1df357 100644
9602 --- a/arch/s390/kernel/module.c
9603 +++ b/arch/s390/kernel/module.c
9604 @@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9606 /* Increase core size by size of got & plt and set start
9607 offsets for got and plt. */
9608 - me->core_size = ALIGN(me->core_size, 4);
9609 - me->arch.got_offset = me->core_size;
9610 - me->core_size += me->arch.got_size;
9611 - me->arch.plt_offset = me->core_size;
9612 - me->core_size += me->arch.plt_size;
9613 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
9614 + me->arch.got_offset = me->core_size_rw;
9615 + me->core_size_rw += me->arch.got_size;
9616 + me->arch.plt_offset = me->core_size_rx;
9617 + me->core_size_rx += me->arch.plt_size;
9621 @@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9622 if (info->got_initialized == 0) {
9625 - gotent = me->module_core + me->arch.got_offset +
9626 + gotent = me->module_core_rw + me->arch.got_offset +
9629 info->got_initialized = 1;
9630 @@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9631 rc = apply_rela_bits(loc, val, 0, 64, 0);
9632 else if (r_type == R_390_GOTENT ||
9633 r_type == R_390_GOTPLTENT) {
9634 - val += (Elf_Addr) me->module_core - loc;
9635 + val += (Elf_Addr) me->module_core_rw - loc;
9636 rc = apply_rela_bits(loc, val, 1, 32, 1);
9639 @@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9640 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9641 if (info->plt_initialized == 0) {
9643 - ip = me->module_core + me->arch.plt_offset +
9644 + ip = me->module_core_rx + me->arch.plt_offset +
9646 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
9648 @@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9649 val - loc + 0xffffUL < 0x1ffffeUL) ||
9650 (r_type == R_390_PLT32DBL &&
9651 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9652 - val = (Elf_Addr) me->module_core +
9653 + val = (Elf_Addr) me->module_core_rx +
9654 me->arch.plt_offset +
9656 val += rela->r_addend - loc;
9657 @@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9658 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9659 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9660 val = val + rela->r_addend -
9661 - ((Elf_Addr) me->module_core + me->arch.got_offset);
9662 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9663 if (r_type == R_390_GOTOFF16)
9664 rc = apply_rela_bits(loc, val, 0, 16, 0);
9665 else if (r_type == R_390_GOTOFF32)
9666 @@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9668 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9669 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9670 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
9671 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9672 rela->r_addend - loc;
9673 if (r_type == R_390_GOTPC)
9674 rc = apply_rela_bits(loc, val, 1, 32, 0);
9675 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9676 index dc5edc2..7d34ae3 100644
9677 --- a/arch/s390/kernel/process.c
9678 +++ b/arch/s390/kernel/process.c
9679 @@ -200,27 +200,3 @@ unsigned long get_wchan(struct task_struct *p)
9684 -unsigned long arch_align_stack(unsigned long sp)
9686 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9687 - sp -= get_random_int() & ~PAGE_MASK;
9691 -static inline unsigned long brk_rnd(void)
9693 - /* 8MB for 32bit, 1GB for 64bit */
9694 - if (is_32bit_task())
9695 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9697 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9700 -unsigned long arch_randomize_brk(struct mm_struct *mm)
9702 - unsigned long ret;
9704 - ret = PAGE_ALIGN(mm->brk + brk_rnd());
9705 - return (ret > mm->brk) ? ret : mm->brk;
9707 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9708 index 6e552af..3e608a1 100644
9709 --- a/arch/s390/mm/mmap.c
9710 +++ b/arch/s390/mm/mmap.c
9711 @@ -239,6 +239,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9713 unsigned long random_factor = 0UL;
9715 +#ifdef CONFIG_PAX_RANDMMAP
9716 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9719 if (current->flags & PF_RANDOMIZE)
9720 random_factor = arch_mmap_rnd();
9722 @@ -248,9 +252,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9724 if (mmap_is_legacy()) {
9725 mm->mmap_base = mmap_base_legacy(random_factor);
9727 +#ifdef CONFIG_PAX_RANDMMAP
9728 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9729 + mm->mmap_base += mm->delta_mmap;
9732 mm->get_unmapped_area = s390_get_unmapped_area;
9734 mm->mmap_base = mmap_base(random_factor);
9736 +#ifdef CONFIG_PAX_RANDMMAP
9737 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9738 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9741 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9744 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9745 index ae3d59f..f65f075 100644
9746 --- a/arch/score/include/asm/cache.h
9747 +++ b/arch/score/include/asm/cache.h
9749 #ifndef _ASM_SCORE_CACHE_H
9750 #define _ASM_SCORE_CACHE_H
9752 +#include <linux/const.h>
9754 #define L1_CACHE_SHIFT 4
9755 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9756 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9758 #endif /* _ASM_SCORE_CACHE_H */
9759 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9760 index f9f3cd5..58ff438 100644
9761 --- a/arch/score/include/asm/exec.h
9762 +++ b/arch/score/include/asm/exec.h
9764 #ifndef _ASM_SCORE_EXEC_H
9765 #define _ASM_SCORE_EXEC_H
9767 -extern unsigned long arch_align_stack(unsigned long sp);
9768 +#define arch_align_stack(x) (x)
9770 #endif /* _ASM_SCORE_EXEC_H */
9771 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9772 index a1519ad3..e8ac1ff 100644
9773 --- a/arch/score/kernel/process.c
9774 +++ b/arch/score/kernel/process.c
9775 @@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9777 return task_pt_regs(task)->cp0_epc;
9780 -unsigned long arch_align_stack(unsigned long sp)
9784 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9785 index ef9e555..331bd29 100644
9786 --- a/arch/sh/include/asm/cache.h
9787 +++ b/arch/sh/include/asm/cache.h
9789 #define __ASM_SH_CACHE_H
9792 +#include <linux/const.h>
9793 #include <linux/init.h>
9794 #include <cpu/cache.h>
9796 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9797 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9799 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9801 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9802 index 6777177..cb5e44f 100644
9803 --- a/arch/sh/mm/mmap.c
9804 +++ b/arch/sh/mm/mmap.c
9805 @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9806 struct mm_struct *mm = current->mm;
9807 struct vm_area_struct *vma;
9808 int do_colour_align;
9809 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9812 if (flags & MAP_FIXED) {
9813 @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9817 +#ifdef CONFIG_PAX_RANDMMAP
9818 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9822 if (do_colour_align)
9823 addr = COLOUR_ALIGN(addr, pgoff);
9824 @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9825 addr = PAGE_ALIGN(addr);
9827 vma = find_vma(mm, addr);
9828 - if (TASK_SIZE - len >= addr &&
9829 - (!vma || addr + len <= vma->vm_start))
9830 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9836 - info.low_limit = TASK_UNMAPPED_BASE;
9837 + info.low_limit = mm->mmap_base;
9838 info.high_limit = TASK_SIZE;
9839 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9840 info.align_offset = pgoff << PAGE_SHIFT;
9841 @@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9842 struct mm_struct *mm = current->mm;
9843 unsigned long addr = addr0;
9844 int do_colour_align;
9845 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9846 struct vm_unmapped_area_info info;
9848 if (flags & MAP_FIXED) {
9849 @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9850 if (filp || (flags & MAP_SHARED))
9851 do_colour_align = 1;
9853 +#ifdef CONFIG_PAX_RANDMMAP
9854 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9857 /* requesting a specific address */
9859 if (do_colour_align)
9860 @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9861 addr = PAGE_ALIGN(addr);
9863 vma = find_vma(mm, addr);
9864 - if (TASK_SIZE - len >= addr &&
9865 - (!vma || addr + len <= vma->vm_start))
9866 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9870 @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9871 VM_BUG_ON(addr != -ENOMEM);
9873 info.low_limit = TASK_UNMAPPED_BASE;
9875 +#ifdef CONFIG_PAX_RANDMMAP
9876 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9877 + info.low_limit += mm->delta_mmap;
9880 info.high_limit = TASK_SIZE;
9881 addr = vm_unmapped_area(&info);
9883 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9884 index 4082749..fd97781 100644
9885 --- a/arch/sparc/include/asm/atomic_64.h
9886 +++ b/arch/sparc/include/asm/atomic_64.h
9888 #define ATOMIC64_INIT(i) { (i) }
9890 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9891 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9893 + return ACCESS_ONCE(v->counter);
9895 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9896 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9898 + return ACCESS_ONCE(v->counter);
9901 #define atomic_set(v, i) (((v)->counter) = i)
9902 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9906 #define atomic64_set(v, i) (((v)->counter) = i)
9907 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9912 -#define ATOMIC_OP(op) \
9913 -void atomic_##op(int, atomic_t *); \
9914 -void atomic64_##op(long, atomic64_t *);
9915 +#define __ATOMIC_OP(op, suffix) \
9916 +void atomic_##op##suffix(int, atomic##suffix##_t *); \
9917 +void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9919 -#define ATOMIC_OP_RETURN(op) \
9920 -int atomic_##op##_return(int, atomic_t *); \
9921 -long atomic64_##op##_return(long, atomic64_t *);
9922 +#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9924 +#define __ATOMIC_OP_RETURN(op, suffix) \
9925 +int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9926 +long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9928 +#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9930 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9932 @@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9935 #undef ATOMIC_OP_RETURN
9936 +#undef __ATOMIC_OP_RETURN
9940 #define atomic_dec_return(v) atomic_sub_return(1, v)
9941 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9943 #define atomic_inc_return(v) atomic_add_return(1, v)
9944 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9946 + return atomic_add_return_unchecked(1, v);
9948 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9949 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9951 + return atomic64_add_return_unchecked(1, v);
9955 * atomic_inc_and_test - increment and test
9956 @@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9959 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9960 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9962 + return atomic_inc_return_unchecked(v) == 0;
9964 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9966 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9967 @@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9968 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9970 #define atomic_inc(v) atomic_add(1, v)
9971 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9973 + atomic_add_unchecked(1, v);
9975 #define atomic64_inc(v) atomic64_add(1, v)
9976 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9978 + atomic64_add_unchecked(1, v);
9981 #define atomic_dec(v) atomic_sub(1, v)
9982 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9984 + atomic_sub_unchecked(1, v);
9986 #define atomic64_dec(v) atomic64_sub(1, v)
9987 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9989 + atomic64_sub_unchecked(1, v);
9992 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9993 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9995 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9996 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9998 + return cmpxchg(&v->counter, old, new);
10000 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10001 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10003 + return xchg(&v->counter, new);
10006 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10010 c = atomic_read(v);
10012 - if (unlikely(c == (u)))
10013 + if (unlikely(c == u))
10015 - old = atomic_cmpxchg((v), c, c + (a));
10017 + asm volatile("addcc %2, %0, %0\n"
10019 +#ifdef CONFIG_PAX_REFCOUNT
10024 + : "0" (c), "ir" (a)
10027 + old = atomic_cmpxchg(v, c, new);
10028 if (likely(old == c))
10031 @@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10032 #define atomic64_cmpxchg(v, o, n) \
10033 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10034 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10035 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10037 + return xchg(&v->counter, new);
10040 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10043 + long c, old, new;
10044 c = atomic64_read(v);
10046 - if (unlikely(c == (u)))
10047 + if (unlikely(c == u))
10049 - old = atomic64_cmpxchg((v), c, c + (a));
10051 + asm volatile("addcc %2, %0, %0\n"
10053 +#ifdef CONFIG_PAX_REFCOUNT
10058 + : "0" (c), "ir" (a)
10061 + old = atomic64_cmpxchg(v, c, new);
10062 if (likely(old == c))
10070 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10071 diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10072 index 7664894..45a974b 100644
10073 --- a/arch/sparc/include/asm/barrier_64.h
10074 +++ b/arch/sparc/include/asm/barrier_64.h
10075 @@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10077 compiletime_assert_atomic_type(*p); \
10079 - ACCESS_ONCE(*p) = (v); \
10080 + ACCESS_ONCE_RW(*p) = (v); \
10083 #define smp_load_acquire(p) \
10084 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10085 index 5bb6991..5c2132e 100644
10086 --- a/arch/sparc/include/asm/cache.h
10087 +++ b/arch/sparc/include/asm/cache.h
10089 #ifndef _SPARC_CACHE_H
10090 #define _SPARC_CACHE_H
10092 +#include <linux/const.h>
10094 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10096 #define L1_CACHE_SHIFT 5
10097 -#define L1_CACHE_BYTES 32
10098 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10100 #ifdef CONFIG_SPARC32
10101 #define SMP_CACHE_BYTES_SHIFT 5
10102 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10103 index a24e41f..47677ff 100644
10104 --- a/arch/sparc/include/asm/elf_32.h
10105 +++ b/arch/sparc/include/asm/elf_32.h
10106 @@ -114,6 +114,13 @@ typedef struct {
10108 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10110 +#ifdef CONFIG_PAX_ASLR
10111 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
10113 +#define PAX_DELTA_MMAP_LEN 16
10114 +#define PAX_DELTA_STACK_LEN 16
10117 /* This yields a mask that user programs can use to figure out what
10118 instruction set this cpu supports. This can NOT be done in userspace
10120 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10121 index 370ca1e..d4f4a98 100644
10122 --- a/arch/sparc/include/asm/elf_64.h
10123 +++ b/arch/sparc/include/asm/elf_64.h
10124 @@ -189,6 +189,13 @@ typedef struct {
10125 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10126 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10128 +#ifdef CONFIG_PAX_ASLR
10129 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10131 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10132 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10135 extern unsigned long sparc64_elf_hwcap;
10136 #define ELF_HWCAP sparc64_elf_hwcap
10138 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10139 index a3890da..f6a408e 100644
10140 --- a/arch/sparc/include/asm/pgalloc_32.h
10141 +++ b/arch/sparc/include/asm/pgalloc_32.h
10142 @@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10145 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10146 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10148 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10149 unsigned long address)
10150 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10151 index 5e31871..13469c6 100644
10152 --- a/arch/sparc/include/asm/pgalloc_64.h
10153 +++ b/arch/sparc/include/asm/pgalloc_64.h
10154 @@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10157 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10158 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10160 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10162 @@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10165 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10166 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10168 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10170 diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10171 index 59ba6f6..4518128 100644
10172 --- a/arch/sparc/include/asm/pgtable.h
10173 +++ b/arch/sparc/include/asm/pgtable.h
10176 #include <asm/pgtable_32.h>
10179 +#define ktla_ktva(addr) (addr)
10180 +#define ktva_ktla(addr) (addr)
10183 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10184 index f06b36a..bca3189 100644
10185 --- a/arch/sparc/include/asm/pgtable_32.h
10186 +++ b/arch/sparc/include/asm/pgtable_32.h
10187 @@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10188 #define PAGE_SHARED SRMMU_PAGE_SHARED
10189 #define PAGE_COPY SRMMU_PAGE_COPY
10190 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10191 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10192 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10193 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10194 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10196 /* Top-level page directory - dummy used by init-mm.
10197 @@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10200 #define __P000 PAGE_NONE
10201 -#define __P001 PAGE_READONLY
10202 -#define __P010 PAGE_COPY
10203 -#define __P011 PAGE_COPY
10204 +#define __P001 PAGE_READONLY_NOEXEC
10205 +#define __P010 PAGE_COPY_NOEXEC
10206 +#define __P011 PAGE_COPY_NOEXEC
10207 #define __P100 PAGE_READONLY
10208 #define __P101 PAGE_READONLY
10209 #define __P110 PAGE_COPY
10210 #define __P111 PAGE_COPY
10212 #define __S000 PAGE_NONE
10213 -#define __S001 PAGE_READONLY
10214 -#define __S010 PAGE_SHARED
10215 -#define __S011 PAGE_SHARED
10216 +#define __S001 PAGE_READONLY_NOEXEC
10217 +#define __S010 PAGE_SHARED_NOEXEC
10218 +#define __S011 PAGE_SHARED_NOEXEC
10219 #define __S100 PAGE_READONLY
10220 #define __S101 PAGE_READONLY
10221 #define __S110 PAGE_SHARED
10222 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10223 index ae51a11..eadfd03 100644
10224 --- a/arch/sparc/include/asm/pgtsrmmu.h
10225 +++ b/arch/sparc/include/asm/pgtsrmmu.h
10226 @@ -111,6 +111,11 @@
10227 SRMMU_EXEC | SRMMU_REF)
10228 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10229 SRMMU_EXEC | SRMMU_REF)
10231 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10232 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10233 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10235 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10236 SRMMU_DIRTY | SRMMU_REF)
10238 diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10239 index 29d64b1..4272fe8 100644
10240 --- a/arch/sparc/include/asm/setup.h
10241 +++ b/arch/sparc/include/asm/setup.h
10242 @@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10243 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10246 -extern atomic_t dcpage_flushes;
10247 -extern atomic_t dcpage_flushes_xcall;
10248 +extern atomic_unchecked_t dcpage_flushes;
10249 +extern atomic_unchecked_t dcpage_flushes_xcall;
10251 extern int sysctl_tsb_ratio;
10253 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10254 index 9689176..63c18ea 100644
10255 --- a/arch/sparc/include/asm/spinlock_64.h
10256 +++ b/arch/sparc/include/asm/spinlock_64.h
10257 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10259 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10261 -static void inline arch_read_lock(arch_rwlock_t *lock)
10262 +static inline void arch_read_lock(arch_rwlock_t *lock)
10264 unsigned long tmp1, tmp2;
10266 __asm__ __volatile__ (
10267 "1: ldsw [%2], %0\n"
10268 " brlz,pn %0, 2f\n"
10269 -"4: add %0, 1, %1\n"
10270 +"4: addcc %0, 1, %1\n"
10272 +#ifdef CONFIG_PAX_REFCOUNT
10276 " cas [%2], %0, %1\n"
10278 " bne,pn %%icc, 1b\n"
10279 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10281 : "=&r" (tmp1), "=&r" (tmp2)
10284 + : "memory", "cc");
10287 -static int inline arch_read_trylock(arch_rwlock_t *lock)
10288 +static inline int arch_read_trylock(arch_rwlock_t *lock)
10292 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10293 "1: ldsw [%2], %0\n"
10294 " brlz,a,pn %0, 2f\n"
10296 -" add %0, 1, %1\n"
10297 +" addcc %0, 1, %1\n"
10299 +#ifdef CONFIG_PAX_REFCOUNT
10303 " cas [%2], %0, %1\n"
10305 " bne,pn %%icc, 1b\n"
10306 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10310 -static void inline arch_read_unlock(arch_rwlock_t *lock)
10311 +static inline void arch_read_unlock(arch_rwlock_t *lock)
10313 unsigned long tmp1, tmp2;
10315 __asm__ __volatile__(
10316 "1: lduw [%2], %0\n"
10317 -" sub %0, 1, %1\n"
10318 +" subcc %0, 1, %1\n"
10320 +#ifdef CONFIG_PAX_REFCOUNT
10324 " cas [%2], %0, %1\n"
10326 " bne,pn %%xcc, 1b\n"
10327 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10331 -static void inline arch_write_lock(arch_rwlock_t *lock)
10332 +static inline void arch_write_lock(arch_rwlock_t *lock)
10334 unsigned long mask, tmp1, tmp2;
10336 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10340 -static void inline arch_write_unlock(arch_rwlock_t *lock)
10341 +static inline void arch_write_unlock(arch_rwlock_t *lock)
10343 __asm__ __volatile__(
10345 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10349 -static int inline arch_write_trylock(arch_rwlock_t *lock)
10350 +static inline int arch_write_trylock(arch_rwlock_t *lock)
10352 unsigned long mask, tmp1, tmp2, result;
10354 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10355 index 229475f..2fca9163 100644
10356 --- a/arch/sparc/include/asm/thread_info_32.h
10357 +++ b/arch/sparc/include/asm/thread_info_32.h
10358 @@ -48,6 +48,7 @@ struct thread_info {
10359 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10360 unsigned long rwbuf_stkptrs[NSWINS];
10361 unsigned long w_saved;
10362 + unsigned long lowest_stack;
10366 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10367 index bde5982..9cbb56d 100644
10368 --- a/arch/sparc/include/asm/thread_info_64.h
10369 +++ b/arch/sparc/include/asm/thread_info_64.h
10370 @@ -59,6 +59,8 @@ struct thread_info {
10371 struct pt_regs *kern_una_regs;
10372 unsigned int kern_una_insn;
10374 + unsigned long lowest_stack;
10376 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10377 __attribute__ ((aligned(64)));
10379 @@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10380 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10381 /* flag bit 4 is available */
10382 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10383 -/* flag bit 6 is available */
10384 +#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10385 #define TIF_32BIT 7 /* 32-bit binary */
10386 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10387 #define TIF_SECCOMP 9 /* secure computing */
10388 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10389 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10391 /* NOTE: Thread flags >= 12 should be ones we have no interest
10392 * in using in assembly, else we can't use the mask as
10393 * an immediate value in instructions such as andcc.
10394 @@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10395 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10396 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10397 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10398 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10400 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10401 _TIF_DO_NOTIFY_RESUME_MASK | \
10403 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10405 +#define _TIF_WORK_SYSCALL \
10406 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10407 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10409 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10412 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10413 index bd56c28..4b63d83 100644
10414 --- a/arch/sparc/include/asm/uaccess.h
10415 +++ b/arch/sparc/include/asm/uaccess.h
10417 #ifndef ___ASM_SPARC_UACCESS_H
10418 #define ___ASM_SPARC_UACCESS_H
10420 #if defined(__sparc__) && defined(__arch64__)
10421 #include <asm/uaccess_64.h>
10423 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10424 index 64ee103..388aef0 100644
10425 --- a/arch/sparc/include/asm/uaccess_32.h
10426 +++ b/arch/sparc/include/asm/uaccess_32.h
10428 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10429 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10430 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10431 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10432 #define access_ok(type, addr, size) \
10433 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10435 @@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10437 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10439 - if (n && __access_ok((unsigned long) to, n))
10443 + if (n && __access_ok((unsigned long) to, n)) {
10444 + if (!__builtin_constant_p(n))
10445 + check_object_size(from, n, true);
10446 return __copy_user(to, (__force void __user *) from, n);
10452 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10457 + if (!__builtin_constant_p(n))
10458 + check_object_size(from, n, true);
10460 return __copy_user(to, (__force void __user *) from, n);
10463 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10465 - if (n && __access_ok((unsigned long) from, n))
10469 + if (n && __access_ok((unsigned long) from, n)) {
10470 + if (!__builtin_constant_p(n))
10471 + check_object_size(to, n, false);
10472 return __copy_user((__force void __user *) to, from, n);
10478 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10483 return __copy_user((__force void __user *) to, from, n);
10486 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10487 index a35194b..47dabc0d 100644
10488 --- a/arch/sparc/include/asm/uaccess_64.h
10489 +++ b/arch/sparc/include/asm/uaccess_64.h
10491 #include <linux/compiler.h>
10492 #include <linux/string.h>
10493 #include <linux/thread_info.h>
10494 +#include <linux/kernel.h>
10495 #include <asm/asi.h>
10496 #include <asm/spitfire.h>
10497 #include <asm-generic/uaccess-unaligned.h>
10498 @@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10502 +static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10507 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10510 @@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10511 static inline unsigned long __must_check
10512 copy_from_user(void *to, const void __user *from, unsigned long size)
10514 - unsigned long ret = ___copy_from_user(to, from, size);
10515 + unsigned long ret;
10517 + if ((long)size < 0 || size > INT_MAX)
10520 + if (!__builtin_constant_p(size))
10521 + check_object_size(to, size, false);
10523 + ret = ___copy_from_user(to, from, size);
10525 ret = copy_from_user_fixup(to, from, size);
10527 @@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10528 static inline unsigned long __must_check
10529 copy_to_user(void __user *to, const void *from, unsigned long size)
10531 - unsigned long ret = ___copy_to_user(to, from, size);
10532 + unsigned long ret;
10534 + if ((long)size < 0 || size > INT_MAX)
10537 + if (!__builtin_constant_p(size))
10538 + check_object_size(from, size, true);
10540 + ret = ___copy_to_user(to, from, size);
10542 ret = copy_to_user_fixup(to, from, size);
10544 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10545 index 7cf9c6e..6206648 100644
10546 --- a/arch/sparc/kernel/Makefile
10547 +++ b/arch/sparc/kernel/Makefile
10552 -ccflags-y := -Werror
10553 +#ccflags-y := -Werror
10555 extra-y := head_$(BITS).o
10557 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10558 index 50e7b62..79fae35 100644
10559 --- a/arch/sparc/kernel/process_32.c
10560 +++ b/arch/sparc/kernel/process_32.c
10561 @@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10563 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10564 r->psr, r->pc, r->npc, r->y, print_tainted());
10565 - printk("PC: <%pS>\n", (void *) r->pc);
10566 + printk("PC: <%pA>\n", (void *) r->pc);
10567 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10568 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10569 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10570 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10571 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10572 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10573 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10574 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10576 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10577 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10578 @@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10579 rw = (struct reg_window32 *) fp;
10581 printk("[%08lx : ", pc);
10582 - printk("%pS ] ", (void *) pc);
10583 + printk("%pA ] ", (void *) pc);
10585 } while (++count < 16);
10587 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10588 index 46a5964..a35c62c 100644
10589 --- a/arch/sparc/kernel/process_64.c
10590 +++ b/arch/sparc/kernel/process_64.c
10591 @@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10592 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10593 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10594 if (regs->tstate & TSTATE_PRIV)
10595 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10596 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10599 void show_regs(struct pt_regs *regs)
10600 @@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10602 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10603 regs->tpc, regs->tnpc, regs->y, print_tainted());
10604 - printk("TPC: <%pS>\n", (void *) regs->tpc);
10605 + printk("TPC: <%pA>\n", (void *) regs->tpc);
10606 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10607 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10609 @@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10610 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10611 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10613 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10614 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10615 show_regwindow(regs);
10616 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10618 @@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10619 ((tp && tp->task) ? tp->task->pid : -1));
10621 if (gp->tstate & TSTATE_PRIV) {
10622 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10623 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10627 diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10628 index 79cc0d1..ec62734 100644
10629 --- a/arch/sparc/kernel/prom_common.c
10630 +++ b/arch/sparc/kernel/prom_common.c
10631 @@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10633 unsigned int prom_early_allocated __initdata;
10635 -static struct of_pdt_ops prom_sparc_ops __initdata = {
10636 +static struct of_pdt_ops prom_sparc_ops __initconst = {
10637 .nextprop = prom_common_nextprop,
10638 .getproplen = prom_getproplen,
10639 .getproperty = prom_getproperty,
10640 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10641 index 9ddc492..27a5619 100644
10642 --- a/arch/sparc/kernel/ptrace_64.c
10643 +++ b/arch/sparc/kernel/ptrace_64.c
10644 @@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10648 +#ifdef CONFIG_GRKERNSEC_SETXID
10649 +extern void gr_delayed_cred_worker(void);
10652 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10655 @@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10656 if (test_thread_flag(TIF_NOHZ))
10659 +#ifdef CONFIG_GRKERNSEC_SETXID
10660 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10661 + gr_delayed_cred_worker();
10664 if (test_thread_flag(TIF_SYSCALL_TRACE))
10665 ret = tracehook_report_syscall_entry(regs);
10667 @@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10668 if (test_thread_flag(TIF_NOHZ))
10671 +#ifdef CONFIG_GRKERNSEC_SETXID
10672 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10673 + gr_delayed_cred_worker();
10676 audit_syscall_exit(regs);
10678 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10679 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10680 index 19cd08d..ff21e99 100644
10681 --- a/arch/sparc/kernel/smp_64.c
10682 +++ b/arch/sparc/kernel/smp_64.c
10683 @@ -891,7 +891,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10686 #ifdef CONFIG_DEBUG_DCFLUSH
10687 - atomic_inc(&dcpage_flushes);
10688 + atomic_inc_unchecked(&dcpage_flushes);
10691 this_cpu = get_cpu();
10692 @@ -915,7 +915,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10693 xcall_deliver(data0, __pa(pg_addr),
10694 (u64) pg_addr, cpumask_of(cpu));
10695 #ifdef CONFIG_DEBUG_DCFLUSH
10696 - atomic_inc(&dcpage_flushes_xcall);
10697 + atomic_inc_unchecked(&dcpage_flushes_xcall);
10701 @@ -934,7 +934,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10704 #ifdef CONFIG_DEBUG_DCFLUSH
10705 - atomic_inc(&dcpage_flushes);
10706 + atomic_inc_unchecked(&dcpage_flushes);
10709 pg_addr = page_address(page);
10710 @@ -951,7 +951,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10711 xcall_deliver(data0, __pa(pg_addr),
10712 (u64) pg_addr, cpu_online_mask);
10713 #ifdef CONFIG_DEBUG_DCFLUSH
10714 - atomic_inc(&dcpage_flushes_xcall);
10715 + atomic_inc_unchecked(&dcpage_flushes_xcall);
10718 __local_flush_dcache_page(page);
10719 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10720 index 646988d..b88905f 100644
10721 --- a/arch/sparc/kernel/sys_sparc_32.c
10722 +++ b/arch/sparc/kernel/sys_sparc_32.c
10723 @@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10724 if (len > TASK_SIZE - PAGE_SIZE)
10727 - addr = TASK_UNMAPPED_BASE;
10728 + addr = current->mm->mmap_base;
10732 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10733 index 30e7ddb..266a3b0 100644
10734 --- a/arch/sparc/kernel/sys_sparc_64.c
10735 +++ b/arch/sparc/kernel/sys_sparc_64.c
10736 @@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10737 struct vm_area_struct * vma;
10738 unsigned long task_size = TASK_SIZE;
10739 int do_color_align;
10740 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10741 struct vm_unmapped_area_info info;
10743 if (flags & MAP_FIXED) {
10744 /* We do not accept a shared mapping if it would violate
10745 * cache aliasing constraints.
10747 - if ((flags & MAP_SHARED) &&
10748 + if ((filp || (flags & MAP_SHARED)) &&
10749 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10752 @@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10753 if (filp || (flags & MAP_SHARED))
10754 do_color_align = 1;
10756 +#ifdef CONFIG_PAX_RANDMMAP
10757 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10761 if (do_color_align)
10762 addr = COLOR_ALIGN(addr, pgoff);
10763 @@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10764 addr = PAGE_ALIGN(addr);
10766 vma = find_vma(mm, addr);
10767 - if (task_size - len >= addr &&
10768 - (!vma || addr + len <= vma->vm_start))
10769 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10775 - info.low_limit = TASK_UNMAPPED_BASE;
10776 + info.low_limit = mm->mmap_base;
10777 info.high_limit = min(task_size, VA_EXCLUDE_START);
10778 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10779 info.align_offset = pgoff << PAGE_SHIFT;
10780 + info.threadstack_offset = offset;
10781 addr = vm_unmapped_area(&info);
10783 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10784 VM_BUG_ON(addr != -ENOMEM);
10785 info.low_limit = VA_EXCLUDE_END;
10787 +#ifdef CONFIG_PAX_RANDMMAP
10788 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10789 + info.low_limit += mm->delta_mmap;
10792 info.high_limit = task_size;
10793 addr = vm_unmapped_area(&info);
10795 @@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10796 unsigned long task_size = STACK_TOP32;
10797 unsigned long addr = addr0;
10798 int do_color_align;
10799 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10800 struct vm_unmapped_area_info info;
10802 /* This should only ever run for 32-bit processes. */
10803 @@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10804 /* We do not accept a shared mapping if it would violate
10805 * cache aliasing constraints.
10807 - if ((flags & MAP_SHARED) &&
10808 + if ((filp || (flags & MAP_SHARED)) &&
10809 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10812 @@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10813 if (filp || (flags & MAP_SHARED))
10814 do_color_align = 1;
10816 +#ifdef CONFIG_PAX_RANDMMAP
10817 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10820 /* requesting a specific address */
10822 if (do_color_align)
10823 @@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10824 addr = PAGE_ALIGN(addr);
10826 vma = find_vma(mm, addr);
10827 - if (task_size - len >= addr &&
10828 - (!vma || addr + len <= vma->vm_start))
10829 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10833 @@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10834 info.high_limit = mm->mmap_base;
10835 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10836 info.align_offset = pgoff << PAGE_SHIFT;
10837 + info.threadstack_offset = offset;
10838 addr = vm_unmapped_area(&info);
10841 @@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10842 VM_BUG_ON(addr != -ENOMEM);
10844 info.low_limit = TASK_UNMAPPED_BASE;
10846 +#ifdef CONFIG_PAX_RANDMMAP
10847 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10848 + info.low_limit += mm->delta_mmap;
10851 info.high_limit = STACK_TOP32;
10852 addr = vm_unmapped_area(&info);
10854 @@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10855 EXPORT_SYMBOL(get_fb_unmapped_area);
10857 /* Essentially the same as PowerPC. */
10858 -static unsigned long mmap_rnd(void)
10859 +static unsigned long mmap_rnd(struct mm_struct *mm)
10861 unsigned long rnd = 0UL;
10863 +#ifdef CONFIG_PAX_RANDMMAP
10864 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10867 if (current->flags & PF_RANDOMIZE) {
10868 unsigned long val = get_random_int();
10869 if (test_thread_flag(TIF_32BIT))
10870 @@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10872 void arch_pick_mmap_layout(struct mm_struct *mm)
10874 - unsigned long random_factor = mmap_rnd();
10875 + unsigned long random_factor = mmap_rnd(mm);
10879 @@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10880 gap == RLIM_INFINITY ||
10881 sysctl_legacy_va_layout) {
10882 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10884 +#ifdef CONFIG_PAX_RANDMMAP
10885 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10886 + mm->mmap_base += mm->delta_mmap;
10889 mm->get_unmapped_area = arch_get_unmapped_area;
10891 /* We know it's 32-bit */
10892 @@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10893 gap = (task_size / 6 * 5);
10895 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10897 +#ifdef CONFIG_PAX_RANDMMAP
10898 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10899 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10902 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10905 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10906 index bb00089..e0ea580 100644
10907 --- a/arch/sparc/kernel/syscalls.S
10908 +++ b/arch/sparc/kernel/syscalls.S
10909 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10912 1: ldx [%g6 + TI_FLAGS], %l5
10913 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10914 + andcc %l5, _TIF_WORK_SYSCALL, %g0
10917 call syscall_trace_leave
10918 @@ -194,7 +194,7 @@ linux_sparc_syscall32:
10920 srl %i3, 0, %o3 ! IEU0
10921 srl %i2, 0, %o2 ! IEU0 Group
10922 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10923 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10924 bne,pn %icc, linux_syscall_trace32 ! CTI
10925 mov %i0, %l5 ! IEU1
10926 5: call %l7 ! CTI Group brk forced
10927 @@ -218,7 +218,7 @@ linux_sparc_syscall:
10929 mov %i3, %o3 ! IEU1
10930 mov %i4, %o4 ! IEU0 Group
10931 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10932 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10933 bne,pn %icc, linux_syscall_trace ! CTI Group
10934 mov %i0, %l5 ! IEU0
10935 2: call %l7 ! CTI Group brk forced
10936 @@ -233,7 +233,7 @@ ret_sys_call:
10938 cmp %o0, -ERESTART_RESTARTBLOCK
10940 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10941 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10942 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10945 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10946 index 4f21df7..0a374da 100644
10947 --- a/arch/sparc/kernel/traps_32.c
10948 +++ b/arch/sparc/kernel/traps_32.c
10949 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10950 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10951 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10953 +extern void gr_handle_kernel_exploit(void);
10955 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10957 static int die_counter;
10958 @@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10960 (((unsigned long) rw) >= PAGE_OFFSET) &&
10961 !(((unsigned long) rw) & 0x7)) {
10962 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
10963 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
10964 (void *) rw->ins[7]);
10965 rw = (struct reg_window32 *)rw->ins[6];
10968 printk("Instruction DUMP:");
10969 instruction_dump ((unsigned long *) regs->pc);
10970 - if(regs->psr & PSR_PS)
10971 + if(regs->psr & PSR_PS) {
10972 + gr_handle_kernel_exploit();
10978 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10979 index d21cd62..00a4a17 100644
10980 --- a/arch/sparc/kernel/traps_64.c
10981 +++ b/arch/sparc/kernel/traps_64.c
10982 @@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10984 p->trapstack[i].tstate, p->trapstack[i].tpc,
10985 p->trapstack[i].tnpc, p->trapstack[i].tt);
10986 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10987 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10991 @@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10994 if (regs->tstate & TSTATE_PRIV) {
10996 +#ifdef CONFIG_PAX_REFCOUNT
10998 + pax_report_refcount_overflow(regs);
11001 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11002 die_if_kernel(buffer, regs);
11004 @@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11005 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11010 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11011 0, lvl, SIGTRAP) == NOTIFY_STOP)
11014 +#ifdef CONFIG_PAX_REFCOUNT
11016 + pax_report_refcount_overflow(regs);
11019 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11021 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11022 @@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11023 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11024 printk("%s" "ERROR(%d): ",
11025 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11026 - printk("TPC<%pS>\n", (void *) regs->tpc);
11027 + printk("TPC<%pA>\n", (void *) regs->tpc);
11028 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11029 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11030 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11031 @@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11032 smp_processor_id(),
11033 (type & 0x1) ? 'I' : 'D',
11035 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11036 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11037 panic("Irrecoverable Cheetah+ parity error.");
11040 @@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11041 smp_processor_id(),
11042 (type & 0x1) ? 'I' : 'D',
11044 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11045 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11048 struct sun4v_error_entry {
11049 @@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11050 /*0x38*/u64 reserved_5;
11053 -static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11054 -static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11055 +static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11056 +static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11058 static const char *sun4v_err_type_to_str(u8 type)
11060 @@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11063 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11064 - int cpu, const char *pfx, atomic_t *ocnt)
11065 + int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11067 u64 *raw_ptr = (u64 *) ent;
11069 @@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11073 - if ((cnt = atomic_read(ocnt)) != 0) {
11074 - atomic_set(ocnt, 0);
11075 + if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11076 + atomic_set_unchecked(ocnt, 0);
11078 printk("%s: Queue overflowed %d times.\n",
11080 @@ -2048,7 +2059,7 @@ out:
11082 void sun4v_resum_overflow(struct pt_regs *regs)
11084 - atomic_inc(&sun4v_resum_oflow_cnt);
11085 + atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11088 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11089 @@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11090 /* XXX Actually even this can make not that much sense. Perhaps
11091 * XXX we should just pull the plug and panic directly from here?
11093 - atomic_inc(&sun4v_nonresum_oflow_cnt);
11094 + atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11097 static void sun4v_tlb_error(struct pt_regs *regs)
11098 @@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11100 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11102 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11103 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11104 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11105 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11106 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11107 (void *) regs->u_regs[UREG_I7]);
11108 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11109 "pte[%lx] error[%lx]\n",
11110 @@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11112 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11114 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11115 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11116 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11117 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11118 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11119 (void *) regs->u_regs[UREG_I7]);
11120 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11121 "pte[%lx] error[%lx]\n",
11122 @@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11123 fp = (unsigned long)sf->fp + STACK_BIAS;
11126 - printk(" [%016lx] %pS\n", pc, (void *) pc);
11127 + printk(" [%016lx] %pA\n", pc, (void *) pc);
11128 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11129 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11130 int index = tsk->curr_ret_stack;
11131 if (tsk->ret_stack && index >= graph) {
11132 pc = tsk->ret_stack[index - graph].ret;
11133 - printk(" [%016lx] %pS\n", pc, (void *) pc);
11134 + printk(" [%016lx] %pA\n", pc, (void *) pc);
11138 @@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11139 return (struct reg_window *) (fp + STACK_BIAS);
11142 +extern void gr_handle_kernel_exploit(void);
11144 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11146 static int die_counter;
11147 @@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11150 kstack_valid(tp, (unsigned long) rw)) {
11151 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
11152 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
11153 (void *) rw->ins[7]);
11155 rw = kernel_stack_up(rw);
11156 @@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11159 panic("Fatal exception");
11160 - if (regs->tstate & TSTATE_PRIV)
11161 + if (regs->tstate & TSTATE_PRIV) {
11162 + gr_handle_kernel_exploit();
11167 EXPORT_SYMBOL(die_if_kernel);
11168 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11169 index 62098a8..547ab2c 100644
11170 --- a/arch/sparc/kernel/unaligned_64.c
11171 +++ b/arch/sparc/kernel/unaligned_64.c
11172 @@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11173 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11175 if (__ratelimit(&ratelimit)) {
11176 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
11177 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
11178 regs->tpc, (void *) regs->tpc);
11181 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11182 index 3269b02..64f5231 100644
11183 --- a/arch/sparc/lib/Makefile
11184 +++ b/arch/sparc/lib/Makefile
11188 asflags-y := -ansi -DST_DIV0=0x02
11189 -ccflags-y := -Werror
11190 +#ccflags-y := -Werror
11192 lib-$(CONFIG_SPARC32) += ashrdi3.o
11193 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11194 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11195 index 05dac43..76f8ed4 100644
11196 --- a/arch/sparc/lib/atomic_64.S
11197 +++ b/arch/sparc/lib/atomic_64.S
11198 @@ -15,11 +15,22 @@
11199 * a value and does the barriers.
11202 -#define ATOMIC_OP(op) \
11203 -ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11204 +#ifdef CONFIG_PAX_REFCOUNT
11205 +#define __REFCOUNT_OP(op) op##cc
11206 +#define __OVERFLOW_IOP tvs %icc, 6;
11207 +#define __OVERFLOW_XOP tvs %xcc, 6;
11209 +#define __REFCOUNT_OP(op) op
11210 +#define __OVERFLOW_IOP
11211 +#define __OVERFLOW_XOP
11214 +#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11215 +ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11216 BACKOFF_SETUP(%o2); \
11217 1: lduw [%o1], %g1; \
11218 - op %g1, %o0, %g7; \
11219 + asm_op %g1, %o0, %g7; \
11221 cas [%o1], %g1, %g7; \
11223 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11224 @@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11225 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11226 ENDPROC(atomic_##op); \
11228 -#define ATOMIC_OP_RETURN(op) \
11229 -ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11230 +#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11231 + __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11233 +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11234 +ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11235 BACKOFF_SETUP(%o2); \
11236 1: lduw [%o1], %g1; \
11237 - op %g1, %o0, %g7; \
11238 + asm_op %g1, %o0, %g7; \
11240 cas [%o1], %g1, %g7; \
11242 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11243 @@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11244 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11245 ENDPROC(atomic_##op##_return);
11247 +#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11248 + __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11250 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11253 @@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11256 #undef ATOMIC_OP_RETURN
11257 +#undef __ATOMIC_OP_RETURN
11259 +#undef __ATOMIC_OP
11261 -#define ATOMIC64_OP(op) \
11262 -ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11263 +#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11264 +ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11265 BACKOFF_SETUP(%o2); \
11266 1: ldx [%o1], %g1; \
11267 - op %g1, %o0, %g7; \
11268 + asm_op %g1, %o0, %g7; \
11270 casx [%o1], %g1, %g7; \
11272 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11273 @@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11274 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11275 ENDPROC(atomic64_##op); \
11277 -#define ATOMIC64_OP_RETURN(op) \
11278 -ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11279 +#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11280 + __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11282 +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11283 +ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11284 BACKOFF_SETUP(%o2); \
11285 1: ldx [%o1], %g1; \
11286 - op %g1, %o0, %g7; \
11287 + asm_op %g1, %o0, %g7; \
11289 casx [%o1], %g1, %g7; \
11291 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11292 @@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11293 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11294 ENDPROC(atomic64_##op##_return);
11296 +#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11297 +i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11299 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11302 @@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11304 #undef ATOMIC64_OPS
11305 #undef ATOMIC64_OP_RETURN
11306 +#undef __ATOMIC64_OP_RETURN
11308 +#undef __ATOMIC64_OP
11309 +#undef __OVERFLOW_XOP
11310 +#undef __OVERFLOW_IOP
11311 +#undef __REFCOUNT_OP
11313 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11315 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11316 index 1d649a9..fbc5bfc 100644
11317 --- a/arch/sparc/lib/ksyms.c
11318 +++ b/arch/sparc/lib/ksyms.c
11319 @@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11320 /* Atomic counter implementation. */
11321 #define ATOMIC_OP(op) \
11322 EXPORT_SYMBOL(atomic_##op); \
11323 -EXPORT_SYMBOL(atomic64_##op);
11324 +EXPORT_SYMBOL(atomic_##op##_unchecked); \
11325 +EXPORT_SYMBOL(atomic64_##op); \
11326 +EXPORT_SYMBOL(atomic64_##op##_unchecked);
11328 #define ATOMIC_OP_RETURN(op) \
11329 EXPORT_SYMBOL(atomic_##op##_return); \
11330 @@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11331 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11334 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
11335 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11339 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11340 index 30c3ecc..736f015 100644
11341 --- a/arch/sparc/mm/Makefile
11342 +++ b/arch/sparc/mm/Makefile
11347 -ccflags-y := -Werror
11348 +#ccflags-y := -Werror
11350 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11351 obj-y += fault_$(BITS).o
11352 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11353 index 70d8171..274c6c0 100644
11354 --- a/arch/sparc/mm/fault_32.c
11355 +++ b/arch/sparc/mm/fault_32.c
11357 #include <linux/perf_event.h>
11358 #include <linux/interrupt.h>
11359 #include <linux/kdebug.h>
11360 +#include <linux/slab.h>
11361 +#include <linux/pagemap.h>
11362 +#include <linux/compiler.h>
11364 #include <asm/page.h>
11365 #include <asm/pgtable.h>
11366 @@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11367 return safe_compute_effective_address(regs, insn);
11370 +#ifdef CONFIG_PAX_PAGEEXEC
11371 +#ifdef CONFIG_PAX_DLRESOLVE
11372 +static void pax_emuplt_close(struct vm_area_struct *vma)
11374 + vma->vm_mm->call_dl_resolve = 0UL;
11377 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11379 + unsigned int *kaddr;
11381 + vmf->page = alloc_page(GFP_HIGHUSER);
11383 + return VM_FAULT_OOM;
11385 + kaddr = kmap(vmf->page);
11386 + memset(kaddr, 0, PAGE_SIZE);
11387 + kaddr[0] = 0x9DE3BFA8U; /* save */
11388 + flush_dcache_page(vmf->page);
11389 + kunmap(vmf->page);
11390 + return VM_FAULT_MAJOR;
11393 +static const struct vm_operations_struct pax_vm_ops = {
11394 + .close = pax_emuplt_close,
11395 + .fault = pax_emuplt_fault
11398 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11402 + INIT_LIST_HEAD(&vma->anon_vma_chain);
11403 + vma->vm_mm = current->mm;
11404 + vma->vm_start = addr;
11405 + vma->vm_end = addr + PAGE_SIZE;
11406 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11407 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11408 + vma->vm_ops = &pax_vm_ops;
11410 + ret = insert_vm_struct(current->mm, vma);
11414 + ++current->mm->total_vm;
11420 + * PaX: decide what to do with offenders (regs->pc = fault address)
11422 + * returns 1 when task should be killed
11423 + * 2 when patched PLT trampoline was detected
11424 + * 3 when unpatched PLT trampoline was detected
11426 +static int pax_handle_fetch_fault(struct pt_regs *regs)
11429 +#ifdef CONFIG_PAX_EMUPLT
11432 + do { /* PaX: patched PLT emulation #1 */
11433 + unsigned int sethi1, sethi2, jmpl;
11435 + err = get_user(sethi1, (unsigned int *)regs->pc);
11436 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11437 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11442 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11443 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
11444 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
11446 + unsigned int addr;
11448 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11449 + addr = regs->u_regs[UREG_G1];
11450 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11452 + regs->npc = addr+4;
11457 + do { /* PaX: patched PLT emulation #2 */
11460 + err = get_user(ba, (unsigned int *)regs->pc);
11465 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11466 + unsigned int addr;
11468 + if ((ba & 0xFFC00000U) == 0x30800000U)
11469 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11471 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11473 + regs->npc = addr+4;
11478 + do { /* PaX: patched PLT emulation #3 */
11479 + unsigned int sethi, bajmpl, nop;
11481 + err = get_user(sethi, (unsigned int *)regs->pc);
11482 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11483 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
11488 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11489 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11490 + nop == 0x01000000U)
11492 + unsigned int addr;
11494 + addr = (sethi & 0x003FFFFFU) << 10;
11495 + regs->u_regs[UREG_G1] = addr;
11496 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11497 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11499 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11501 + regs->npc = addr+4;
11506 + do { /* PaX: unpatched PLT emulation step 1 */
11507 + unsigned int sethi, ba, nop;
11509 + err = get_user(sethi, (unsigned int *)regs->pc);
11510 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
11511 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
11516 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11517 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11518 + nop == 0x01000000U)
11520 + unsigned int addr, save, call;
11522 + if ((ba & 0xFFC00000U) == 0x30800000U)
11523 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11525 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11527 + err = get_user(save, (unsigned int *)addr);
11528 + err |= get_user(call, (unsigned int *)(addr+4));
11529 + err |= get_user(nop, (unsigned int *)(addr+8));
11533 +#ifdef CONFIG_PAX_DLRESOLVE
11534 + if (save == 0x9DE3BFA8U &&
11535 + (call & 0xC0000000U) == 0x40000000U &&
11536 + nop == 0x01000000U)
11538 + struct vm_area_struct *vma;
11539 + unsigned long call_dl_resolve;
11541 + down_read(¤t->mm->mmap_sem);
11542 + call_dl_resolve = current->mm->call_dl_resolve;
11543 + up_read(¤t->mm->mmap_sem);
11544 + if (likely(call_dl_resolve))
11547 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11549 + down_write(¤t->mm->mmap_sem);
11550 + if (current->mm->call_dl_resolve) {
11551 + call_dl_resolve = current->mm->call_dl_resolve;
11552 + up_write(¤t->mm->mmap_sem);
11554 + kmem_cache_free(vm_area_cachep, vma);
11558 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11559 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11560 + up_write(¤t->mm->mmap_sem);
11562 + kmem_cache_free(vm_area_cachep, vma);
11566 + if (pax_insert_vma(vma, call_dl_resolve)) {
11567 + up_write(¤t->mm->mmap_sem);
11568 + kmem_cache_free(vm_area_cachep, vma);
11572 + current->mm->call_dl_resolve = call_dl_resolve;
11573 + up_write(¤t->mm->mmap_sem);
11576 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11577 + regs->pc = call_dl_resolve;
11578 + regs->npc = addr+4;
11583 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11584 + if ((save & 0xFFC00000U) == 0x05000000U &&
11585 + (call & 0xFFFFE000U) == 0x85C0A000U &&
11586 + nop == 0x01000000U)
11588 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11589 + regs->u_regs[UREG_G2] = addr + 4;
11590 + addr = (save & 0x003FFFFFU) << 10;
11591 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11593 + regs->npc = addr+4;
11599 + do { /* PaX: unpatched PLT emulation step 2 */
11600 + unsigned int save, call, nop;
11602 + err = get_user(save, (unsigned int *)(regs->pc-4));
11603 + err |= get_user(call, (unsigned int *)regs->pc);
11604 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
11608 + if (save == 0x9DE3BFA8U &&
11609 + (call & 0xC0000000U) == 0x40000000U &&
11610 + nop == 0x01000000U)
11612 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11614 + regs->u_regs[UREG_RETPC] = regs->pc;
11615 + regs->pc = dl_resolve;
11616 + regs->npc = dl_resolve+4;
11625 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11629 + printk(KERN_ERR "PAX: bytes at PC: ");
11630 + for (i = 0; i < 8; i++) {
11632 + if (get_user(c, (unsigned int *)pc+i))
11633 + printk(KERN_CONT "???????? ");
11635 + printk(KERN_CONT "%08x ", c);
11641 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11644 @@ -226,6 +500,24 @@ good_area:
11645 if (!(vma->vm_flags & VM_WRITE))
11649 +#ifdef CONFIG_PAX_PAGEEXEC
11650 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11651 + up_read(&mm->mmap_sem);
11652 + switch (pax_handle_fetch_fault(regs)) {
11654 +#ifdef CONFIG_PAX_EMUPLT
11661 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11662 + do_group_exit(SIGKILL);
11666 /* Allow reads even for write-only mappings */
11667 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11669 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11670 index 4798232..f76e3aa 100644
11671 --- a/arch/sparc/mm/fault_64.c
11672 +++ b/arch/sparc/mm/fault_64.c
11674 #include <linux/kdebug.h>
11675 #include <linux/percpu.h>
11676 #include <linux/context_tracking.h>
11677 +#include <linux/slab.h>
11678 +#include <linux/pagemap.h>
11679 +#include <linux/compiler.h>
11681 #include <asm/page.h>
11682 #include <asm/pgtable.h>
11683 @@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11684 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11686 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11687 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11688 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11689 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11691 unhandled_fault(regs->tpc, current, regs);
11692 @@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11696 +#ifdef CONFIG_PAX_PAGEEXEC
11697 +#ifdef CONFIG_PAX_DLRESOLVE
11698 +static void pax_emuplt_close(struct vm_area_struct *vma)
11700 + vma->vm_mm->call_dl_resolve = 0UL;
11703 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11705 + unsigned int *kaddr;
11707 + vmf->page = alloc_page(GFP_HIGHUSER);
11709 + return VM_FAULT_OOM;
11711 + kaddr = kmap(vmf->page);
11712 + memset(kaddr, 0, PAGE_SIZE);
11713 + kaddr[0] = 0x9DE3BFA8U; /* save */
11714 + flush_dcache_page(vmf->page);
11715 + kunmap(vmf->page);
11716 + return VM_FAULT_MAJOR;
11719 +static const struct vm_operations_struct pax_vm_ops = {
11720 + .close = pax_emuplt_close,
11721 + .fault = pax_emuplt_fault
11724 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11728 + INIT_LIST_HEAD(&vma->anon_vma_chain);
11729 + vma->vm_mm = current->mm;
11730 + vma->vm_start = addr;
11731 + vma->vm_end = addr + PAGE_SIZE;
11732 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11733 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11734 + vma->vm_ops = &pax_vm_ops;
11736 + ret = insert_vm_struct(current->mm, vma);
11740 + ++current->mm->total_vm;
11746 + * PaX: decide what to do with offenders (regs->tpc = fault address)
11748 + * returns 1 when task should be killed
11749 + * 2 when patched PLT trampoline was detected
11750 + * 3 when unpatched PLT trampoline was detected
11752 +static int pax_handle_fetch_fault(struct pt_regs *regs)
11755 +#ifdef CONFIG_PAX_EMUPLT
11758 + do { /* PaX: patched PLT emulation #1 */
11759 + unsigned int sethi1, sethi2, jmpl;
11761 + err = get_user(sethi1, (unsigned int *)regs->tpc);
11762 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11763 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11768 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11769 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
11770 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
11772 + unsigned long addr;
11774 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11775 + addr = regs->u_regs[UREG_G1];
11776 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11778 + if (test_thread_flag(TIF_32BIT))
11779 + addr &= 0xFFFFFFFFUL;
11781 + regs->tpc = addr;
11782 + regs->tnpc = addr+4;
11787 + do { /* PaX: patched PLT emulation #2 */
11790 + err = get_user(ba, (unsigned int *)regs->tpc);
11795 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11796 + unsigned long addr;
11798 + if ((ba & 0xFFC00000U) == 0x30800000U)
11799 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11801 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11803 + if (test_thread_flag(TIF_32BIT))
11804 + addr &= 0xFFFFFFFFUL;
11806 + regs->tpc = addr;
11807 + regs->tnpc = addr+4;
11812 + do { /* PaX: patched PLT emulation #3 */
11813 + unsigned int sethi, bajmpl, nop;
11815 + err = get_user(sethi, (unsigned int *)regs->tpc);
11816 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11817 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11822 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11823 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11824 + nop == 0x01000000U)
11826 + unsigned long addr;
11828 + addr = (sethi & 0x003FFFFFU) << 10;
11829 + regs->u_regs[UREG_G1] = addr;
11830 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11831 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11833 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11835 + if (test_thread_flag(TIF_32BIT))
11836 + addr &= 0xFFFFFFFFUL;
11838 + regs->tpc = addr;
11839 + regs->tnpc = addr+4;
11844 + do { /* PaX: patched PLT emulation #4 */
11845 + unsigned int sethi, mov1, call, mov2;
11847 + err = get_user(sethi, (unsigned int *)regs->tpc);
11848 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11849 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
11850 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11855 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11856 + mov1 == 0x8210000FU &&
11857 + (call & 0xC0000000U) == 0x40000000U &&
11858 + mov2 == 0x9E100001U)
11860 + unsigned long addr;
11862 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11863 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11865 + if (test_thread_flag(TIF_32BIT))
11866 + addr &= 0xFFFFFFFFUL;
11868 + regs->tpc = addr;
11869 + regs->tnpc = addr+4;
11874 + do { /* PaX: patched PLT emulation #5 */
11875 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11877 + err = get_user(sethi, (unsigned int *)regs->tpc);
11878 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11881 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11882 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11883 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11884 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11889 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11890 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
11891 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11892 + (or1 & 0xFFFFE000U) == 0x82106000U &&
11893 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
11894 + sllx == 0x83287020U &&
11895 + jmpl == 0x81C04005U &&
11896 + nop == 0x01000000U)
11898 + unsigned long addr;
11900 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11901 + regs->u_regs[UREG_G1] <<= 32;
11902 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11903 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11904 + regs->tpc = addr;
11905 + regs->tnpc = addr+4;
11910 + do { /* PaX: patched PLT emulation #6 */
11911 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11913 + err = get_user(sethi, (unsigned int *)regs->tpc);
11914 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11915 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11916 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11917 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
11918 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11919 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11924 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11925 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
11926 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11927 + sllx == 0x83287020U &&
11928 + (or & 0xFFFFE000U) == 0x8A116000U &&
11929 + jmpl == 0x81C04005U &&
11930 + nop == 0x01000000U)
11932 + unsigned long addr;
11934 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11935 + regs->u_regs[UREG_G1] <<= 32;
11936 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11937 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11938 + regs->tpc = addr;
11939 + regs->tnpc = addr+4;
11944 + do { /* PaX: unpatched PLT emulation step 1 */
11945 + unsigned int sethi, ba, nop;
11947 + err = get_user(sethi, (unsigned int *)regs->tpc);
11948 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11949 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11954 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11955 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11956 + nop == 0x01000000U)
11958 + unsigned long addr;
11959 + unsigned int save, call;
11960 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11962 + if ((ba & 0xFFC00000U) == 0x30800000U)
11963 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11965 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11967 + if (test_thread_flag(TIF_32BIT))
11968 + addr &= 0xFFFFFFFFUL;
11970 + err = get_user(save, (unsigned int *)addr);
11971 + err |= get_user(call, (unsigned int *)(addr+4));
11972 + err |= get_user(nop, (unsigned int *)(addr+8));
11976 +#ifdef CONFIG_PAX_DLRESOLVE
11977 + if (save == 0x9DE3BFA8U &&
11978 + (call & 0xC0000000U) == 0x40000000U &&
11979 + nop == 0x01000000U)
11981 + struct vm_area_struct *vma;
11982 + unsigned long call_dl_resolve;
11984 + down_read(¤t->mm->mmap_sem);
11985 + call_dl_resolve = current->mm->call_dl_resolve;
11986 + up_read(¤t->mm->mmap_sem);
11987 + if (likely(call_dl_resolve))
11990 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11992 + down_write(¤t->mm->mmap_sem);
11993 + if (current->mm->call_dl_resolve) {
11994 + call_dl_resolve = current->mm->call_dl_resolve;
11995 + up_write(¤t->mm->mmap_sem);
11997 + kmem_cache_free(vm_area_cachep, vma);
12001 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12002 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12003 + up_write(¤t->mm->mmap_sem);
12005 + kmem_cache_free(vm_area_cachep, vma);
12009 + if (pax_insert_vma(vma, call_dl_resolve)) {
12010 + up_write(¤t->mm->mmap_sem);
12011 + kmem_cache_free(vm_area_cachep, vma);
12015 + current->mm->call_dl_resolve = call_dl_resolve;
12016 + up_write(¤t->mm->mmap_sem);
12019 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12020 + regs->tpc = call_dl_resolve;
12021 + regs->tnpc = addr+4;
12026 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12027 + if ((save & 0xFFC00000U) == 0x05000000U &&
12028 + (call & 0xFFFFE000U) == 0x85C0A000U &&
12029 + nop == 0x01000000U)
12031 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12032 + regs->u_regs[UREG_G2] = addr + 4;
12033 + addr = (save & 0x003FFFFFU) << 10;
12034 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12036 + if (test_thread_flag(TIF_32BIT))
12037 + addr &= 0xFFFFFFFFUL;
12039 + regs->tpc = addr;
12040 + regs->tnpc = addr+4;
12044 + /* PaX: 64-bit PLT stub */
12045 + err = get_user(sethi1, (unsigned int *)addr);
12046 + err |= get_user(sethi2, (unsigned int *)(addr+4));
12047 + err |= get_user(or1, (unsigned int *)(addr+8));
12048 + err |= get_user(or2, (unsigned int *)(addr+12));
12049 + err |= get_user(sllx, (unsigned int *)(addr+16));
12050 + err |= get_user(add, (unsigned int *)(addr+20));
12051 + err |= get_user(jmpl, (unsigned int *)(addr+24));
12052 + err |= get_user(nop, (unsigned int *)(addr+28));
12056 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12057 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12058 + (or1 & 0xFFFFE000U) == 0x88112000U &&
12059 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
12060 + sllx == 0x89293020U &&
12061 + add == 0x8A010005U &&
12062 + jmpl == 0x89C14000U &&
12063 + nop == 0x01000000U)
12065 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12066 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12067 + regs->u_regs[UREG_G4] <<= 32;
12068 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12069 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12070 + regs->u_regs[UREG_G4] = addr + 24;
12071 + addr = regs->u_regs[UREG_G5];
12072 + regs->tpc = addr;
12073 + regs->tnpc = addr+4;
12079 +#ifdef CONFIG_PAX_DLRESOLVE
12080 + do { /* PaX: unpatched PLT emulation step 2 */
12081 + unsigned int save, call, nop;
12083 + err = get_user(save, (unsigned int *)(regs->tpc-4));
12084 + err |= get_user(call, (unsigned int *)regs->tpc);
12085 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12089 + if (save == 0x9DE3BFA8U &&
12090 + (call & 0xC0000000U) == 0x40000000U &&
12091 + nop == 0x01000000U)
12093 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12095 + if (test_thread_flag(TIF_32BIT))
12096 + dl_resolve &= 0xFFFFFFFFUL;
12098 + regs->u_regs[UREG_RETPC] = regs->tpc;
12099 + regs->tpc = dl_resolve;
12100 + regs->tnpc = dl_resolve+4;
12106 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12107 + unsigned int sethi, ba, nop;
12109 + err = get_user(sethi, (unsigned int *)regs->tpc);
12110 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12111 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12116 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
12117 + (ba & 0xFFF00000U) == 0x30600000U &&
12118 + nop == 0x01000000U)
12120 + unsigned long addr;
12122 + addr = (sethi & 0x003FFFFFU) << 10;
12123 + regs->u_regs[UREG_G1] = addr;
12124 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12126 + if (test_thread_flag(TIF_32BIT))
12127 + addr &= 0xFFFFFFFFUL;
12129 + regs->tpc = addr;
12130 + regs->tnpc = addr+4;
12140 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12144 + printk(KERN_ERR "PAX: bytes at PC: ");
12145 + for (i = 0; i < 8; i++) {
12147 + if (get_user(c, (unsigned int *)pc+i))
12148 + printk(KERN_CONT "???????? ");
12150 + printk(KERN_CONT "%08x ", c);
12156 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12158 enum ctx_state prev_state = exception_enter();
12159 @@ -353,6 +816,29 @@ retry:
12163 +#ifdef CONFIG_PAX_PAGEEXEC
12164 + /* PaX: detect ITLB misses on non-exec pages */
12165 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12166 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12168 + if (address != regs->tpc)
12171 + up_read(&mm->mmap_sem);
12172 + switch (pax_handle_fetch_fault(regs)) {
12174 +#ifdef CONFIG_PAX_EMUPLT
12181 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12182 + do_group_exit(SIGKILL);
12186 /* Pure DTLB misses do not tell us whether the fault causing
12187 * load/store/atomic was a write or not, it only says that there
12188 * was no match. So in such a case we (carefully) read the
12189 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12190 index 4242eab..9ae6360 100644
12191 --- a/arch/sparc/mm/hugetlbpage.c
12192 +++ b/arch/sparc/mm/hugetlbpage.c
12193 @@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12194 unsigned long addr,
12196 unsigned long pgoff,
12197 - unsigned long flags)
12198 + unsigned long flags,
12199 + unsigned long offset)
12201 + struct mm_struct *mm = current->mm;
12202 unsigned long task_size = TASK_SIZE;
12203 struct vm_unmapped_area_info info;
12205 @@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12209 - info.low_limit = TASK_UNMAPPED_BASE;
12210 + info.low_limit = mm->mmap_base;
12211 info.high_limit = min(task_size, VA_EXCLUDE_START);
12212 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12213 info.align_offset = 0;
12214 + info.threadstack_offset = offset;
12215 addr = vm_unmapped_area(&info);
12217 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12218 VM_BUG_ON(addr != -ENOMEM);
12219 info.low_limit = VA_EXCLUDE_END;
12221 +#ifdef CONFIG_PAX_RANDMMAP
12222 + if (mm->pax_flags & MF_PAX_RANDMMAP)
12223 + info.low_limit += mm->delta_mmap;
12226 info.high_limit = task_size;
12227 addr = vm_unmapped_area(&info);
12229 @@ -55,7 +64,8 @@ static unsigned long
12230 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12231 const unsigned long len,
12232 const unsigned long pgoff,
12233 - const unsigned long flags)
12234 + const unsigned long flags,
12235 + const unsigned long offset)
12237 struct mm_struct *mm = current->mm;
12238 unsigned long addr = addr0;
12239 @@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12240 info.high_limit = mm->mmap_base;
12241 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12242 info.align_offset = 0;
12243 + info.threadstack_offset = offset;
12244 addr = vm_unmapped_area(&info);
12247 @@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12248 VM_BUG_ON(addr != -ENOMEM);
12250 info.low_limit = TASK_UNMAPPED_BASE;
12252 +#ifdef CONFIG_PAX_RANDMMAP
12253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
12254 + info.low_limit += mm->delta_mmap;
12257 info.high_limit = STACK_TOP32;
12258 addr = vm_unmapped_area(&info);
12260 @@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12261 struct mm_struct *mm = current->mm;
12262 struct vm_area_struct *vma;
12263 unsigned long task_size = TASK_SIZE;
12264 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12266 if (test_thread_flag(TIF_32BIT))
12267 task_size = STACK_TOP32;
12268 @@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12272 +#ifdef CONFIG_PAX_RANDMMAP
12273 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12277 addr = ALIGN(addr, HPAGE_SIZE);
12278 vma = find_vma(mm, addr);
12279 - if (task_size - len >= addr &&
12280 - (!vma || addr + len <= vma->vm_start))
12281 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12284 if (mm->get_unmapped_area == arch_get_unmapped_area)
12285 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12287 + pgoff, flags, offset);
12289 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12291 + pgoff, flags, offset);
12294 pte_t *huge_pte_alloc(struct mm_struct *mm,
12295 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12296 index 559cb74..9e5f097 100644
12297 --- a/arch/sparc/mm/init_64.c
12298 +++ b/arch/sparc/mm/init_64.c
12299 @@ -187,9 +187,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12300 int num_kernel_image_mappings;
12302 #ifdef CONFIG_DEBUG_DCFLUSH
12303 -atomic_t dcpage_flushes = ATOMIC_INIT(0);
12304 +atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12306 -atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12307 +atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12311 @@ -197,7 +197,7 @@ inline void flush_dcache_page_impl(struct page *page)
12313 BUG_ON(tlb_type == hypervisor);
12314 #ifdef CONFIG_DEBUG_DCFLUSH
12315 - atomic_inc(&dcpage_flushes);
12316 + atomic_inc_unchecked(&dcpage_flushes);
12319 #ifdef DCACHE_ALIASING_POSSIBLE
12320 @@ -469,10 +469,10 @@ void mmu_info(struct seq_file *m)
12322 #ifdef CONFIG_DEBUG_DCFLUSH
12323 seq_printf(m, "DCPageFlushes\t: %d\n",
12324 - atomic_read(&dcpage_flushes));
12325 + atomic_read_unchecked(&dcpage_flushes));
12327 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12328 - atomic_read(&dcpage_flushes_xcall));
12329 + atomic_read_unchecked(&dcpage_flushes_xcall));
12330 #endif /* CONFIG_SMP */
12331 #endif /* CONFIG_DEBUG_DCFLUSH */
12333 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12334 index a07e31b..85c9003 100644
12335 --- a/arch/tile/Kconfig
12336 +++ b/arch/tile/Kconfig
12337 @@ -198,6 +198,7 @@ source "kernel/Kconfig.hz"
12340 bool "kexec system call"
12341 + depends on !GRKERNSEC_KMEM
12343 kexec is a system call that implements the ability to shutdown your
12344 current kernel, and to start another kernel. It is like a reboot
12345 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12346 index 7b11c5f..755a026 100644
12347 --- a/arch/tile/include/asm/atomic_64.h
12348 +++ b/arch/tile/include/asm/atomic_64.h
12349 @@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12351 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12353 +#define atomic64_read_unchecked(v) atomic64_read(v)
12354 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12355 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12356 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12357 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12358 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
12359 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12360 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
12361 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12363 /* Define this to indicate that cmpxchg is an efficient operation. */
12364 #define __HAVE_ARCH_CMPXCHG
12366 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12367 index 6160761..00cac88 100644
12368 --- a/arch/tile/include/asm/cache.h
12369 +++ b/arch/tile/include/asm/cache.h
12370 @@ -15,11 +15,12 @@
12371 #ifndef _ASM_TILE_CACHE_H
12372 #define _ASM_TILE_CACHE_H
12374 +#include <linux/const.h>
12375 #include <arch/chip.h>
12377 /* bytes per L1 data cache line */
12378 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12379 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12380 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12382 /* bytes per L2 cache line */
12383 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12384 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12385 index f41cb53..31d3ab4 100644
12386 --- a/arch/tile/include/asm/uaccess.h
12387 +++ b/arch/tile/include/asm/uaccess.h
12388 @@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12389 const void __user *from,
12392 - int sz = __compiletime_object_size(to);
12393 + size_t sz = __compiletime_object_size(to);
12395 - if (likely(sz == -1 || sz >= n))
12396 + if (likely(sz == (size_t)-1 || sz >= n))
12397 n = _copy_from_user(to, from, n);
12399 copy_from_user_overflow();
12400 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12401 index 8416240..a012fb7 100644
12402 --- a/arch/tile/mm/hugetlbpage.c
12403 +++ b/arch/tile/mm/hugetlbpage.c
12404 @@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12405 info.high_limit = TASK_SIZE;
12406 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12407 info.align_offset = 0;
12408 + info.threadstack_offset = 0;
12409 return vm_unmapped_area(&info);
12412 @@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12413 info.high_limit = current->mm->mmap_base;
12414 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12415 info.align_offset = 0;
12416 + info.threadstack_offset = 0;
12417 addr = vm_unmapped_area(&info);
12420 diff --git a/arch/um/Makefile b/arch/um/Makefile
12421 index 17d4460..9d74338e3de4 100644
12422 --- a/arch/um/Makefile
12423 +++ b/arch/um/Makefile
12424 @@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12425 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12426 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12428 +ifdef CONSTIFY_PLUGIN
12429 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12432 #This will adjust *FLAGS accordingly to the platform.
12433 include $(ARCH_DIR)/Makefile-os-$(OS)
12435 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12436 index 19e1bdd..3665b77 100644
12437 --- a/arch/um/include/asm/cache.h
12438 +++ b/arch/um/include/asm/cache.h
12440 #ifndef __UM_CACHE_H
12441 #define __UM_CACHE_H
12443 +#include <linux/const.h>
12445 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12446 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12448 # define L1_CACHE_SHIFT 5
12451 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12452 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12455 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12456 index 2e0a6b1..a64d0f5 100644
12457 --- a/arch/um/include/asm/kmap_types.h
12458 +++ b/arch/um/include/asm/kmap_types.h
12461 /* No more #include "asm/arch/kmap_types.h" ! */
12463 -#define KM_TYPE_NR 14
12464 +#define KM_TYPE_NR 15
12467 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12468 index 71c5d13..4c7b9f1 100644
12469 --- a/arch/um/include/asm/page.h
12470 +++ b/arch/um/include/asm/page.h
12472 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12473 #define PAGE_MASK (~(PAGE_SIZE-1))
12475 +#define ktla_ktva(addr) (addr)
12476 +#define ktva_ktla(addr) (addr)
12478 #ifndef __ASSEMBLY__
12481 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12482 index 2b4274e..754fe06 100644
12483 --- a/arch/um/include/asm/pgtable-3level.h
12484 +++ b/arch/um/include/asm/pgtable-3level.h
12486 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12487 #define pud_populate(mm, pud, pmd) \
12488 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12489 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12491 #ifdef CONFIG_64BIT
12492 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12493 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12494 index 68b9119..f72353c 100644
12495 --- a/arch/um/kernel/process.c
12496 +++ b/arch/um/kernel/process.c
12497 @@ -345,22 +345,6 @@ int singlestepping(void * t)
12502 - * Only x86 and x86_64 have an arch_align_stack().
12503 - * All other arches have "#define arch_align_stack(x) (x)"
12504 - * in their asm/exec.h
12505 - * As this is included in UML from asm-um/system-generic.h,
12506 - * we can use it to behave as the subarch does.
12508 -#ifndef arch_align_stack
12509 -unsigned long arch_align_stack(unsigned long sp)
12511 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12512 - sp -= get_random_int() % 8192;
12513 - return sp & ~0xf;
12517 unsigned long get_wchan(struct task_struct *p)
12519 unsigned long stack_page, sp, ip;
12520 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12521 index ad8f795..2c7eec6 100644
12522 --- a/arch/unicore32/include/asm/cache.h
12523 +++ b/arch/unicore32/include/asm/cache.h
12525 #ifndef __UNICORE_CACHE_H__
12526 #define __UNICORE_CACHE_H__
12528 -#define L1_CACHE_SHIFT (5)
12529 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12530 +#include <linux/const.h>
12532 +#define L1_CACHE_SHIFT 5
12533 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12536 * Memory returned by kmalloc() may be used for DMA, so we must make
12537 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12538 index 226d569..d420edc 100644
12539 --- a/arch/x86/Kconfig
12540 +++ b/arch/x86/Kconfig
12541 @@ -32,7 +32,7 @@ config X86
12542 select HAVE_AOUT if X86_32
12543 select HAVE_UNSTABLE_SCHED_CLOCK
12544 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
12545 - select ARCH_SUPPORTS_INT128 if X86_64
12546 + select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW
12548 select HAVE_OPROFILE
12549 select HAVE_PCSPKR_PLATFORM
12550 @@ -134,7 +134,7 @@ config X86
12552 select HAVE_DEBUG_STACKOVERFLOW
12553 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12554 - select HAVE_CC_STACKPROTECTOR
12555 + select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12556 select GENERIC_CPU_AUTOPROBE
12557 select HAVE_ARCH_AUDITSYSCALL
12558 select ARCH_SUPPORTS_ATOMIC_RMW
12559 @@ -266,7 +266,7 @@ config X86_HT
12561 config X86_32_LAZY_GS
12563 - depends on X86_32 && !CC_STACKPROTECTOR
12564 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12566 config ARCH_HWEIGHT_CFLAGS
12568 @@ -638,6 +638,7 @@ config SCHED_OMIT_FRAME_POINTER
12570 menuconfig HYPERVISOR_GUEST
12571 bool "Linux guest support"
12572 + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12574 Say Y here to enable options for running Linux under various hyper-
12575 visors. This option enables basic hypervisor detection and platform
12576 @@ -1005,6 +1006,7 @@ config VM86
12579 bool "Enable support for 16-bit segments" if EXPERT
12580 + depends on !GRKERNSEC
12583 This option is required by programs like Wine to run 16-bit
12584 @@ -1178,6 +1180,7 @@ choice
12588 + depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12590 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12591 However, the address space of 32-bit x86 processors is only 4
12592 @@ -1214,6 +1217,7 @@ config NOHIGHMEM
12596 + depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12598 Select this if you have a 32-bit processor and between 1 and 4
12599 gigabytes of physical RAM.
12600 @@ -1266,7 +1270,7 @@ config PAGE_OFFSET
12602 default 0xB0000000 if VMSPLIT_3G_OPT
12603 default 0x80000000 if VMSPLIT_2G
12604 - default 0x78000000 if VMSPLIT_2G_OPT
12605 + default 0x70000000 if VMSPLIT_2G_OPT
12606 default 0x40000000 if VMSPLIT_1G
12609 @@ -1717,6 +1721,7 @@ source kernel/Kconfig.hz
12612 bool "kexec system call"
12613 + depends on !GRKERNSEC_KMEM
12615 kexec is a system call that implements the ability to shutdown your
12616 current kernel, and to start another kernel. It is like a reboot
12617 @@ -1899,7 +1904,9 @@ config X86_NEED_RELOCS
12619 config PHYSICAL_ALIGN
12620 hex "Alignment value to which kernel should be aligned"
12621 - default "0x200000"
12622 + default "0x1000000"
12623 + range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12624 + range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12625 range 0x2000 0x1000000 if X86_32
12626 range 0x200000 0x1000000 if X86_64
12628 @@ -1982,6 +1989,7 @@ config COMPAT_VDSO
12630 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12631 depends on X86_32 || IA32_EMULATION
12632 + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12634 Certain buggy versions of glibc will crash if they are
12635 presented with a 32-bit vDSO that is not mapped at the address
12636 @@ -2046,6 +2054,22 @@ config CMDLINE_OVERRIDE
12637 This is used to work around broken boot loaders. This should
12638 be set to 'N' under normal conditions.
12640 +config DEFAULT_MODIFY_LDT_SYSCALL
12641 + bool "Allow userspace to modify the LDT by default"
12645 + Modifying the LDT (Local Descriptor Table) may be needed to run a
12646 + 16-bit or segmented code such as Dosemu or Wine. This is done via
12647 + a system call which is not needed to run portable applications,
12648 + and which can sometimes be abused to exploit some weaknesses of
12649 + the architecture, opening new vulnerabilities.
12651 + For this reason this option allows one to enable or disable the
12652 + feature at runtime. It is recommended to say 'N' here to leave
12653 + the system protected, and to enable it at runtime only if needed
12654 + by setting the sys.kernel.modify_ldt sysctl.
12656 source "kernel/livepatch/Kconfig"
12659 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12660 index 6983314..54ad7e8 100644
12661 --- a/arch/x86/Kconfig.cpu
12662 +++ b/arch/x86/Kconfig.cpu
12663 @@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12665 config X86_F00F_BUG
12667 - depends on M586MMX || M586TSC || M586 || M486
12668 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12670 config X86_INVD_BUG
12672 @@ -327,7 +327,7 @@ config X86_INVD_BUG
12674 config X86_ALIGNMENT_16
12676 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12677 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12679 config X86_INTEL_USERCOPY
12681 @@ -369,7 +369,7 @@ config X86_CMPXCHG64
12685 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12686 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12688 config X86_MINIMUM_CPU_FAMILY
12690 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12691 index 72484a6..83a4411 100644
12692 --- a/arch/x86/Kconfig.debug
12693 +++ b/arch/x86/Kconfig.debug
12694 @@ -89,7 +89,7 @@ config EFI_PGT_DUMP
12695 config DEBUG_RODATA
12696 bool "Write protect kernel read-only data structures"
12698 - depends on DEBUG_KERNEL
12699 + depends on DEBUG_KERNEL && BROKEN
12701 Mark the kernel read-only data as write-protected in the pagetables,
12702 in order to catch accidental (and incorrect) writes to such const
12703 @@ -107,7 +107,7 @@ config DEBUG_RODATA_TEST
12705 config DEBUG_SET_MODULE_RONX
12706 bool "Set loadable kernel module data as NX and text as RO"
12707 - depends on MODULES
12708 + depends on MODULES && BROKEN
12710 This option helps catch unintended modifications to loadable
12711 kernel module's text and read-only data. It also prevents execution
12712 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12713 index 2fda005..2c72d40 100644
12714 --- a/arch/x86/Makefile
12715 +++ b/arch/x86/Makefile
12716 @@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12717 # CPU-specific tuning. Anything which can be shared with UML should go here.
12718 include arch/x86/Makefile_32.cpu
12719 KBUILD_CFLAGS += $(cflags-y)
12721 - # temporary until string.h is fixed
12722 - KBUILD_CFLAGS += -ffreestanding
12725 UTS_MACHINE := x86_64
12726 @@ -107,6 +104,9 @@ else
12727 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12730 +# temporary until string.h is fixed
12731 +KBUILD_CFLAGS += -ffreestanding
12733 # Make sure compiler does not have buggy stack-protector support.
12734 ifdef CONFIG_CC_STACKPROTECTOR
12735 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12736 @@ -181,6 +181,7 @@ archheaders:
12737 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12740 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12741 ifeq ($(CONFIG_KEXEC_FILE),y)
12742 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12744 @@ -264,3 +265,9 @@ define archhelp
12745 echo ' FDARGS="..." arguments for the booted kernel'
12746 echo ' FDINITRD=file initrd for the booted kernel'
12751 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12752 +*** Please upgrade your binutils to 2.18 or newer
12754 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12755 index 57bbf2f..b100fce 100644
12756 --- a/arch/x86/boot/Makefile
12757 +++ b/arch/x86/boot/Makefile
12758 @@ -58,6 +58,9 @@ clean-files += cpustr.h
12759 # ---------------------------------------------------------------------------
12761 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12762 +ifdef CONSTIFY_PLUGIN
12763 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12765 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12768 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12769 index 878e4b9..20537ab 100644
12770 --- a/arch/x86/boot/bitops.h
12771 +++ b/arch/x86/boot/bitops.h
12772 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12774 const u32 *p = (const u32 *)addr;
12776 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12777 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12781 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12783 static inline void set_bit(int nr, void *addr)
12785 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12786 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12789 #endif /* BOOT_BITOPS_H */
12790 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12791 index bd49ec6..94c7f58 100644
12792 --- a/arch/x86/boot/boot.h
12793 +++ b/arch/x86/boot/boot.h
12794 @@ -84,7 +84,7 @@ static inline void io_delay(void)
12795 static inline u16 ds(void)
12798 - asm("movw %%ds,%0" : "=rm" (seg));
12799 + asm volatile("movw %%ds,%0" : "=rm" (seg));
12803 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12804 index 0a291cd..9686efc 100644
12805 --- a/arch/x86/boot/compressed/Makefile
12806 +++ b/arch/x86/boot/compressed/Makefile
12807 @@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12808 KBUILD_CFLAGS += -mno-mmx -mno-sse
12809 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12810 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12811 +ifdef CONSTIFY_PLUGIN
12812 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12815 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12817 diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12818 index a53440e..c3dbf1e 100644
12819 --- a/arch/x86/boot/compressed/efi_stub_32.S
12820 +++ b/arch/x86/boot/compressed/efi_stub_32.S
12821 @@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12822 * parameter 2, ..., param n. To make things easy, we save the return
12823 * address of efi_call_phys in a global variable.
12826 - movl %ecx, saved_return_addr(%edx)
12827 - /* get the function pointer into ECX*/
12829 - movl %ecx, efi_rt_function_ptr(%edx)
12830 + popl saved_return_addr(%edx)
12831 + popl efi_rt_function_ptr(%edx)
12834 * 3. Call the physical function.
12837 + call *efi_rt_function_ptr(%edx)
12840 * 4. Balance the stack. And because EAX contain the return value,
12841 @@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12845 - movl efi_rt_function_ptr(%edx), %ecx
12847 + pushl efi_rt_function_ptr(%edx)
12850 * 10. Push the saved return address onto the stack and return.
12852 - movl saved_return_addr(%edx), %ecx
12855 + jmpl *saved_return_addr(%edx)
12856 ENDPROC(efi_call_phys)
12859 diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12860 index 630384a..278e788 100644
12861 --- a/arch/x86/boot/compressed/efi_thunk_64.S
12862 +++ b/arch/x86/boot/compressed/efi_thunk_64.S
12863 @@ -189,8 +189,8 @@ efi_gdt64:
12864 .long 0 /* Filled out by user */
12866 .quad 0x0000000000000000 /* NULL descriptor */
12867 - .quad 0x00af9a000000ffff /* __KERNEL_CS */
12868 - .quad 0x00cf92000000ffff /* __KERNEL_DS */
12869 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
12870 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
12871 .quad 0x0080890000000000 /* TS descriptor */
12872 .quad 0x0000000000000000 /* TS continued */
12874 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12875 index 8ef964d..fcfb8aa 100644
12876 --- a/arch/x86/boot/compressed/head_32.S
12877 +++ b/arch/x86/boot/compressed/head_32.S
12878 @@ -141,10 +141,10 @@ preferred_addr:
12882 - cmpl $LOAD_PHYSICAL_ADDR, %ebx
12883 + cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12886 - movl $LOAD_PHYSICAL_ADDR, %ebx
12887 + movl $____LOAD_PHYSICAL_ADDR, %ebx
12890 /* Target address to relocate to for decompression */
12891 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12892 index b0c0d16..3b44ff8 100644
12893 --- a/arch/x86/boot/compressed/head_64.S
12894 +++ b/arch/x86/boot/compressed/head_64.S
12895 @@ -95,10 +95,10 @@ ENTRY(startup_32)
12899 - cmpl $LOAD_PHYSICAL_ADDR, %ebx
12900 + cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12903 - movl $LOAD_PHYSICAL_ADDR, %ebx
12904 + movl $____LOAD_PHYSICAL_ADDR, %ebx
12907 /* Target address to relocate to for decompression */
12908 @@ -323,10 +323,10 @@ preferred_addr:
12912 - cmpq $LOAD_PHYSICAL_ADDR, %rbp
12913 + cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12916 - movq $LOAD_PHYSICAL_ADDR, %rbp
12917 + movq $____LOAD_PHYSICAL_ADDR, %rbp
12920 /* Target address to relocate to for decompression */
12921 @@ -435,8 +435,8 @@ gdt:
12924 .quad 0x0000000000000000 /* NULL descriptor */
12925 - .quad 0x00af9a000000ffff /* __KERNEL_CS */
12926 - .quad 0x00cf92000000ffff /* __KERNEL_DS */
12927 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
12928 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
12929 .quad 0x0080890000000000 /* TS descriptor */
12930 .quad 0x0000000000000000 /* TS continued */
12932 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12933 index a107b93..55602de 100644
12934 --- a/arch/x86/boot/compressed/misc.c
12935 +++ b/arch/x86/boot/compressed/misc.c
12936 @@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12937 * Calculate the delta between where vmlinux was linked to load
12938 * and where it was actually loaded.
12940 - delta = min_addr - LOAD_PHYSICAL_ADDR;
12941 + delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12943 debug_putstr("No relocation needed... ");
12945 @@ -324,7 +324,7 @@ static void parse_elf(void *output)
12947 Elf32_Phdr *phdrs, *phdr;
12950 + void *dest, *prev;
12953 memcpy(&ehdr, output, sizeof(ehdr));
12954 @@ -351,13 +351,16 @@ static void parse_elf(void *output)
12956 #ifdef CONFIG_RELOCATABLE
12958 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12959 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12961 dest = (void *)(phdr->p_paddr);
12964 output + phdr->p_offset,
12967 + memset(prev, 0xff, dest - prev);
12968 + prev = dest + phdr->p_filesz;
12970 default: /* Ignore other PT_* */ break;
12972 @@ -419,7 +422,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12973 error("Destination address too large");
12975 #ifndef CONFIG_RELOCATABLE
12976 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12977 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12978 error("Wrong destination address");
12981 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12982 index 1fd7d57..0f7d096 100644
12983 --- a/arch/x86/boot/cpucheck.c
12984 +++ b/arch/x86/boot/cpucheck.c
12985 @@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12986 u32 ecx = MSR_K7_HWCR;
12989 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12990 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12992 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12993 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12995 get_cpuflags(); /* Make sure it really did something */
12996 err = check_cpuflags();
12997 @@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12998 u32 ecx = MSR_VIA_FCR;
13001 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13002 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13003 eax |= (1<<1)|(1<<7);
13004 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13005 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13007 set_bit(X86_FEATURE_CX8, cpu.flags);
13008 err = check_cpuflags();
13009 @@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13013 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13014 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13016 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13017 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13018 + asm volatile("cpuid"
13019 : "+a" (level), "=d" (cpu.flags[0])
13021 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13022 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13024 err = check_cpuflags();
13025 } else if (err == 0x01 &&
13026 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13027 index 16ef025..91e033b 100644
13028 --- a/arch/x86/boot/header.S
13029 +++ b/arch/x86/boot/header.S
13030 @@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13031 # single linked list of
13032 # struct setup_data
13034 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13035 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13037 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13038 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13039 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13041 #define VO_INIT_SIZE (VO__end - VO__text)
13043 #if ZO_INIT_SIZE > VO_INIT_SIZE
13044 #define INIT_SIZE ZO_INIT_SIZE
13046 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13047 index db75d07..8e6d0af 100644
13048 --- a/arch/x86/boot/memory.c
13049 +++ b/arch/x86/boot/memory.c
13052 static int detect_memory_e820(void)
13055 + unsigned int count = 0;
13056 struct biosregs ireg, oreg;
13057 struct e820entry *desc = boot_params.e820_map;
13058 static struct e820entry buf; /* static so it is zeroed */
13059 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13060 index ba3e100..6501b8f 100644
13061 --- a/arch/x86/boot/video-vesa.c
13062 +++ b/arch/x86/boot/video-vesa.c
13063 @@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13065 boot_params.screen_info.vesapm_seg = oreg.es;
13066 boot_params.screen_info.vesapm_off = oreg.di;
13067 + boot_params.screen_info.vesapm_size = oreg.cx;
13071 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13072 index 05111bb..a1ae1f0 100644
13073 --- a/arch/x86/boot/video.c
13074 +++ b/arch/x86/boot/video.c
13075 @@ -98,7 +98,7 @@ static void store_mode_params(void)
13076 static unsigned int get_entry(void)
13080 + unsigned int i, len = 0;
13084 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13085 index 9105655..41779c1 100644
13086 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
13087 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13089 * including this sentence is retained in full.
13092 +#include <asm/alternative-asm.h>
13094 .extern crypto_ft_tab
13095 .extern crypto_it_tab
13096 .extern crypto_fl_tab
13101 +#define ret pax_force_retaddr; ret
13103 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13106 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13107 index 6bd2c6c..368c93e 100644
13108 --- a/arch/x86/crypto/aesni-intel_asm.S
13109 +++ b/arch/x86/crypto/aesni-intel_asm.S
13112 #include <linux/linkage.h>
13113 #include <asm/inst.h>
13114 +#include <asm/alternative-asm.h>
13117 * The following macros are used to move an (un)aligned 16 byte value to/from
13118 @@ -217,7 +218,7 @@ enc: .octa 0x2
13119 * num_initial_blocks = b mod 4
13120 * encrypt the initial num_initial_blocks blocks and apply ghash on
13122 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13123 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13125 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13127 @@ -227,8 +228,8 @@ enc: .octa 0x2
13128 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13129 MOVADQ SHUF_MASK(%rip), %xmm14
13130 mov arg7, %r10 # %r10 = AAD
13131 - mov arg8, %r12 # %r12 = aadLen
13133 + mov arg8, %r15 # %r15 = aadLen
13135 pxor %xmm\i, %xmm\i
13137 _get_AAD_loop\num_initial_blocks\operation:
13138 @@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13144 jne _get_AAD_loop\num_initial_blocks\operation
13147 je _get_AAD_loop2_done\num_initial_blocks\operation
13151 _get_AAD_loop2\num_initial_blocks\operation:
13157 jne _get_AAD_loop2\num_initial_blocks\operation
13159 _get_AAD_loop2_done\num_initial_blocks\operation:
13160 @@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13161 * num_initial_blocks = b mod 4
13162 * encrypt the initial num_initial_blocks blocks and apply ghash on
13164 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13165 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13167 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13169 @@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13170 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13171 MOVADQ SHUF_MASK(%rip), %xmm14
13172 mov arg7, %r10 # %r10 = AAD
13173 - mov arg8, %r12 # %r12 = aadLen
13175 + mov arg8, %r15 # %r15 = aadLen
13177 pxor %xmm\i, %xmm\i
13178 _get_AAD_loop\num_initial_blocks\operation:
13180 @@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13186 jne _get_AAD_loop\num_initial_blocks\operation
13188 je _get_AAD_loop2_done\num_initial_blocks\operation
13191 _get_AAD_loop2\num_initial_blocks\operation:
13197 jne _get_AAD_loop2\num_initial_blocks\operation
13198 _get_AAD_loop2_done\num_initial_blocks\operation:
13199 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13200 @@ -1280,7 +1281,7 @@ _esb_loop_\@:
13202 *****************************************************************************/
13203 ENTRY(aesni_gcm_dec)
13209 @@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13211 sub $VARIABLE_OFFSET, %rsp
13212 and $~63, %rsp # align rsp to 64 bytes
13214 - movdqu (%r12), %xmm13 # %xmm13 = HashKey
13216 + movdqu (%r15), %xmm13 # %xmm13 = HashKey
13217 movdqa SHUF_MASK(%rip), %xmm2
13218 PSHUFB_XMM %xmm2, %xmm13
13220 @@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13221 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13222 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13223 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13225 - and $(3<<4), %r12
13227 + and $(3<<4), %r15
13228 jz _initial_num_blocks_is_0_decrypt
13229 - cmp $(2<<4), %r12
13230 + cmp $(2<<4), %r15
13231 jb _initial_num_blocks_is_1_decrypt
13232 je _initial_num_blocks_is_2_decrypt
13233 _initial_num_blocks_is_3_decrypt:
13234 @@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13237 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13238 - lea SHIFT_MASK+16(%rip), %r12
13240 + lea SHIFT_MASK+16(%rip), %r15
13242 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13243 # (%r13 is the number of bytes in plaintext mod 16)
13244 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13245 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13246 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13248 movdqa %xmm1, %xmm2
13249 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13250 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13251 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13252 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13253 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13255 @@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13257 jne _less_than_8_bytes_left_decrypt
13258 _multiple_of_16_bytes_decrypt:
13259 - mov arg8, %r12 # %r13 = aadLen (number of bytes)
13260 - shl $3, %r12 # convert into number of bits
13261 - movd %r12d, %xmm15 # len(A) in %xmm15
13262 + mov arg8, %r15 # %r13 = aadLen (number of bytes)
13263 + shl $3, %r15 # convert into number of bits
13264 + movd %r15d, %xmm15 # len(A) in %xmm15
13265 shl $3, %arg4 # len(C) in bits (*128)
13266 MOVQ_R64_XMM %arg4, %xmm1
13267 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13268 @@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13274 + pax_force_retaddr
13276 ENDPROC(aesni_gcm_dec)
13278 @@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13279 * poly = x^128 + x^127 + x^126 + x^121 + 1
13280 ***************************************************************************/
13281 ENTRY(aesni_gcm_enc)
13287 @@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13289 sub $VARIABLE_OFFSET, %rsp
13292 - movdqu (%r12), %xmm13
13294 + movdqu (%r15), %xmm13
13295 movdqa SHUF_MASK(%rip), %xmm2
13296 PSHUFB_XMM %xmm2, %xmm13
13298 @@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13299 movdqa %xmm13, HashKey(%rsp)
13300 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13305 # Encrypt first few blocks
13307 - and $(3<<4), %r12
13308 + and $(3<<4), %r15
13309 jz _initial_num_blocks_is_0_encrypt
13310 - cmp $(2<<4), %r12
13311 + cmp $(2<<4), %r15
13312 jb _initial_num_blocks_is_1_encrypt
13313 je _initial_num_blocks_is_2_encrypt
13314 _initial_num_blocks_is_3_encrypt:
13315 @@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13318 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13319 - lea SHIFT_MASK+16(%rip), %r12
13321 + lea SHIFT_MASK+16(%rip), %r15
13323 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13324 # (%r13 is the number of bytes in plaintext mod 16)
13325 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13326 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13327 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13328 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13329 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13330 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13331 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13332 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13333 movdqa SHUF_MASK(%rip), %xmm10
13334 @@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13336 jne _less_than_8_bytes_left_encrypt
13337 _multiple_of_16_bytes_encrypt:
13338 - mov arg8, %r12 # %r12 = addLen (number of bytes)
13340 - movd %r12d, %xmm15 # len(A) in %xmm15
13341 + mov arg8, %r15 # %r15 = addLen (number of bytes)
13343 + movd %r15d, %xmm15 # len(A) in %xmm15
13344 shl $3, %arg4 # len(C) in bits (*128)
13345 MOVQ_R64_XMM %arg4, %xmm1
13346 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13347 @@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13353 + pax_force_retaddr
13355 ENDPROC(aesni_gcm_enc)
13357 @@ -1733,6 +1736,7 @@ _key_expansion_256a:
13359 movaps %xmm0, (TKEYP)
13361 + pax_force_retaddr
13363 ENDPROC(_key_expansion_128)
13364 ENDPROC(_key_expansion_256a)
13365 @@ -1759,6 +1763,7 @@ _key_expansion_192a:
13366 shufps $0b01001110, %xmm2, %xmm1
13367 movaps %xmm1, 0x10(TKEYP)
13369 + pax_force_retaddr
13371 ENDPROC(_key_expansion_192a)
13373 @@ -1779,6 +1784,7 @@ _key_expansion_192b:
13375 movaps %xmm0, (TKEYP)
13377 + pax_force_retaddr
13379 ENDPROC(_key_expansion_192b)
13381 @@ -1792,6 +1798,7 @@ _key_expansion_256b:
13383 movaps %xmm2, (TKEYP)
13385 + pax_force_retaddr
13387 ENDPROC(_key_expansion_256b)
13389 @@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13393 + pax_force_retaddr
13395 ENDPROC(aesni_set_key)
13397 @@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13401 + pax_force_retaddr
13405 @@ -1985,6 +1994,7 @@ _aesni_enc1:
13407 movaps 0x70(TKEYP), KEY
13408 AESENCLAST KEY STATE
13409 + pax_force_retaddr
13411 ENDPROC(_aesni_enc1)
13413 @@ -2094,6 +2104,7 @@ _aesni_enc4:
13414 AESENCLAST KEY STATE2
13415 AESENCLAST KEY STATE3
13416 AESENCLAST KEY STATE4
13417 + pax_force_retaddr
13419 ENDPROC(_aesni_enc4)
13421 @@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13425 + pax_force_retaddr
13429 @@ -2175,6 +2187,7 @@ _aesni_dec1:
13431 movaps 0x70(TKEYP), KEY
13432 AESDECLAST KEY STATE
13433 + pax_force_retaddr
13435 ENDPROC(_aesni_dec1)
13437 @@ -2284,6 +2297,7 @@ _aesni_dec4:
13438 AESDECLAST KEY STATE2
13439 AESDECLAST KEY STATE3
13440 AESDECLAST KEY STATE4
13441 + pax_force_retaddr
13443 ENDPROC(_aesni_dec4)
13445 @@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13449 + pax_force_retaddr
13451 ENDPROC(aesni_ecb_enc)
13453 @@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13457 + pax_force_retaddr
13459 ENDPROC(aesni_ecb_dec)
13461 @@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13465 + pax_force_retaddr
13467 ENDPROC(aesni_cbc_enc)
13469 @@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13473 + pax_force_retaddr
13475 ENDPROC(aesni_cbc_dec)
13477 @@ -2561,6 +2579,7 @@ _aesni_inc_init:
13479 MOVQ_R64_XMM TCTR_LOW INC
13480 MOVQ_R64_XMM CTR TCTR_LOW
13481 + pax_force_retaddr
13483 ENDPROC(_aesni_inc_init)
13485 @@ -2590,6 +2609,7 @@ _aesni_inc:
13488 PSHUFB_XMM BSWAP_MASK IV
13489 + pax_force_retaddr
13491 ENDPROC(_aesni_inc)
13493 @@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13496 .Lctr_enc_just_ret:
13497 + pax_force_retaddr
13499 ENDPROC(aesni_ctr_enc)
13501 @@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13503 movdqu STATE4, 0x70(OUTP)
13505 + pax_force_retaddr
13507 ENDPROC(aesni_xts_crypt8)
13509 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13510 index 246c670..466e2d6 100644
13511 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13512 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13516 #include <linux/linkage.h>
13517 +#include <asm/alternative-asm.h>
13519 .file "blowfish-x86_64-asm.S"
13521 @@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13525 + pax_force_retaddr
13529 + pax_force_retaddr
13531 ENDPROC(__blowfish_enc_blk)
13533 @@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13537 + pax_force_retaddr
13539 ENDPROC(blowfish_dec_blk)
13541 @@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13545 + pax_force_retaddr
13549 @@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13553 + pax_force_retaddr
13555 ENDPROC(__blowfish_enc_blk_4way)
13557 @@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13561 + pax_force_retaddr
13563 ENDPROC(blowfish_dec_blk_4way)
13564 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13565 index ce71f92..1dce7ec 100644
13566 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13567 +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13571 #include <linux/linkage.h>
13572 +#include <asm/alternative-asm.h>
13574 #define CAMELLIA_TABLE_BYTE_LEN 272
13576 @@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13577 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13578 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13580 + pax_force_retaddr
13582 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13584 @@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13585 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13586 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13588 + pax_force_retaddr
13590 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13592 @@ -780,6 +783,7 @@ __camellia_enc_blk16:
13593 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13594 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13596 + pax_force_retaddr
13600 @@ -865,6 +869,7 @@ __camellia_dec_blk16:
13601 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13602 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13604 + pax_force_retaddr
13608 @@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13609 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13612 + pax_force_retaddr
13614 ENDPROC(camellia_ecb_enc_16way)
13616 @@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13617 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13620 + pax_force_retaddr
13622 ENDPROC(camellia_ecb_dec_16way)
13624 @@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13625 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13628 + pax_force_retaddr
13630 ENDPROC(camellia_cbc_dec_16way)
13632 @@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13633 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13636 + pax_force_retaddr
13638 ENDPROC(camellia_ctr_16way)
13640 @@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13641 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13644 + pax_force_retaddr
13646 ENDPROC(camellia_xts_crypt_16way)
13648 diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13649 index 0e0b886..5a3123c 100644
13650 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13651 +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13655 #include <linux/linkage.h>
13656 +#include <asm/alternative-asm.h>
13658 #define CAMELLIA_TABLE_BYTE_LEN 272
13660 @@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13661 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13662 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13664 + pax_force_retaddr
13666 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13668 @@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13669 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13670 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13672 + pax_force_retaddr
13674 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13676 @@ -820,6 +823,7 @@ __camellia_enc_blk32:
13677 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13678 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13680 + pax_force_retaddr
13684 @@ -905,6 +909,7 @@ __camellia_dec_blk32:
13685 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13686 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13688 + pax_force_retaddr
13692 @@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13696 + pax_force_retaddr
13698 ENDPROC(camellia_ecb_enc_32way)
13700 @@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13704 + pax_force_retaddr
13706 ENDPROC(camellia_ecb_dec_32way)
13708 @@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13712 + pax_force_retaddr
13714 ENDPROC(camellia_cbc_dec_32way)
13716 @@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13720 + pax_force_retaddr
13722 ENDPROC(camellia_ctr_32way)
13724 @@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13728 + pax_force_retaddr
13730 ENDPROC(camellia_xts_crypt_32way)
13732 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13733 index 310319c..db3d7b5 100644
13734 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13735 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13739 #include <linux/linkage.h>
13740 +#include <asm/alternative-asm.h>
13742 .file "camellia-x86_64-asm_64.S"
13744 @@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13745 enc_outunpack(mov, RT1);
13748 + pax_force_retaddr
13752 enc_outunpack(xor, RT1);
13755 + pax_force_retaddr
13757 ENDPROC(__camellia_enc_blk)
13759 @@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13763 + pax_force_retaddr
13765 ENDPROC(camellia_dec_blk)
13767 @@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13771 + pax_force_retaddr
13775 @@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13779 + pax_force_retaddr
13781 ENDPROC(__camellia_enc_blk_2way)
13783 @@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13787 + pax_force_retaddr
13789 ENDPROC(camellia_dec_blk_2way)
13790 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13791 index c35fd5d..2d8c7db 100644
13792 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13793 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13797 #include <linux/linkage.h>
13798 +#include <asm/alternative-asm.h>
13800 .file "cast5-avx-x86_64-asm_64.S"
13802 @@ -281,6 +282,7 @@ __cast5_enc_blk16:
13803 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13804 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13806 + pax_force_retaddr
13808 ENDPROC(__cast5_enc_blk16)
13810 @@ -352,6 +354,7 @@ __cast5_dec_blk16:
13811 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13812 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13814 + pax_force_retaddr
13818 @@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13819 vmovdqu RR4, (6*4*4)(%r11);
13820 vmovdqu RL4, (7*4*4)(%r11);
13822 + pax_force_retaddr
13824 ENDPROC(cast5_ecb_enc_16way)
13826 @@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13827 vmovdqu RR4, (6*4*4)(%r11);
13828 vmovdqu RL4, (7*4*4)(%r11);
13830 + pax_force_retaddr
13832 ENDPROC(cast5_ecb_dec_16way)
13834 @@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13845 vmovdqu (0*16)(%rdx), RL1;
13846 vmovdqu (1*16)(%rdx), RR1;
13847 @@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13848 call __cast5_dec_blk16;
13851 - vmovq (%r12), RX;
13852 + vmovq (%r14), RX;
13853 vpshufd $0x4f, RX, RX;
13854 vpxor RX, RR1, RR1;
13855 - vpxor 0*16+8(%r12), RL1, RL1;
13856 - vpxor 1*16+8(%r12), RR2, RR2;
13857 - vpxor 2*16+8(%r12), RL2, RL2;
13858 - vpxor 3*16+8(%r12), RR3, RR3;
13859 - vpxor 4*16+8(%r12), RL3, RL3;
13860 - vpxor 5*16+8(%r12), RR4, RR4;
13861 - vpxor 6*16+8(%r12), RL4, RL4;
13862 + vpxor 0*16+8(%r14), RL1, RL1;
13863 + vpxor 1*16+8(%r14), RR2, RR2;
13864 + vpxor 2*16+8(%r14), RL2, RL2;
13865 + vpxor 3*16+8(%r14), RR3, RR3;
13866 + vpxor 4*16+8(%r14), RL3, RL3;
13867 + vpxor 5*16+8(%r14), RR4, RR4;
13868 + vpxor 6*16+8(%r14), RL4, RL4;
13870 vmovdqu RR1, (0*16)(%r11);
13871 vmovdqu RL1, (1*16)(%r11);
13872 @@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13873 vmovdqu RR4, (6*16)(%r11);
13874 vmovdqu RL4, (7*16)(%r11);
13879 + pax_force_retaddr
13881 ENDPROC(cast5_cbc_dec_16way)
13883 @@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13884 * %rcx: iv (big endian, 64bit)
13894 vpcmpeqd RTMP, RTMP, RTMP;
13895 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13896 @@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13897 call __cast5_enc_blk16;
13899 /* dst = src ^ iv */
13900 - vpxor (0*16)(%r12), RR1, RR1;
13901 - vpxor (1*16)(%r12), RL1, RL1;
13902 - vpxor (2*16)(%r12), RR2, RR2;
13903 - vpxor (3*16)(%r12), RL2, RL2;
13904 - vpxor (4*16)(%r12), RR3, RR3;
13905 - vpxor (5*16)(%r12), RL3, RL3;
13906 - vpxor (6*16)(%r12), RR4, RR4;
13907 - vpxor (7*16)(%r12), RL4, RL4;
13908 + vpxor (0*16)(%r14), RR1, RR1;
13909 + vpxor (1*16)(%r14), RL1, RL1;
13910 + vpxor (2*16)(%r14), RR2, RR2;
13911 + vpxor (3*16)(%r14), RL2, RL2;
13912 + vpxor (4*16)(%r14), RR3, RR3;
13913 + vpxor (5*16)(%r14), RL3, RL3;
13914 + vpxor (6*16)(%r14), RR4, RR4;
13915 + vpxor (7*16)(%r14), RL4, RL4;
13916 vmovdqu RR1, (0*16)(%r11);
13917 vmovdqu RL1, (1*16)(%r11);
13918 vmovdqu RR2, (2*16)(%r11);
13919 @@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13920 vmovdqu RR4, (6*16)(%r11);
13921 vmovdqu RL4, (7*16)(%r11);
13926 + pax_force_retaddr
13928 ENDPROC(cast5_ctr_16way)
13929 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13930 index e3531f8..e123f35 100644
13931 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13932 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13936 #include <linux/linkage.h>
13937 +#include <asm/alternative-asm.h>
13938 #include "glue_helper-asm-avx.S"
13940 .file "cast6-avx-x86_64-asm_64.S"
13941 @@ -295,6 +296,7 @@ __cast6_enc_blk8:
13942 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13943 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13945 + pax_force_retaddr
13947 ENDPROC(__cast6_enc_blk8)
13949 @@ -340,6 +342,7 @@ __cast6_dec_blk8:
13950 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13951 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13953 + pax_force_retaddr
13955 ENDPROC(__cast6_dec_blk8)
13957 @@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13959 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13961 + pax_force_retaddr
13963 ENDPROC(cast6_ecb_enc_8way)
13965 @@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13967 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13969 + pax_force_retaddr
13971 ENDPROC(cast6_ecb_dec_8way)
13973 @@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13984 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13986 call __cast6_dec_blk8;
13988 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13989 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13994 + pax_force_retaddr
13996 ENDPROC(cast6_cbc_dec_8way)
13998 @@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13999 * %rcx: iv (little endian, 128bit)
14009 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14010 RD2, RX, RKR, RKM);
14012 call __cast6_enc_blk8;
14014 - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14015 + store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14020 + pax_force_retaddr
14022 ENDPROC(cast6_ctr_8way)
14024 @@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14025 /* dst <= regs xor IVs(in dst) */
14026 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14028 + pax_force_retaddr
14030 ENDPROC(cast6_xts_enc_8way)
14032 @@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14033 /* dst <= regs xor IVs(in dst) */
14034 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14036 + pax_force_retaddr
14038 ENDPROC(cast6_xts_dec_8way)
14039 diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14040 index 225be06..2885e731 100644
14041 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14042 +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14045 #include <asm/inst.h>
14046 #include <linux/linkage.h>
14047 +#include <asm/alternative-asm.h>
14049 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14051 @@ -309,6 +310,7 @@ do_return:
14055 + pax_force_retaddr
14058 ################################################################
14059 @@ -330,7 +332,7 @@ ENDPROC(crc_pcl)
14060 ## PCLMULQDQ tables
14061 ## Table is 128 entries x 2 words (8 bytes) each
14062 ################################################################
14063 -.section .rotata, "a", %progbits
14064 +.section .rodata, "a", %progbits
14067 .long 0x493c7d27, 0x00000001
14068 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14069 index 5d1e007..098cb4f 100644
14070 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14071 +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14074 #include <linux/linkage.h>
14075 #include <asm/inst.h>
14076 +#include <asm/alternative-asm.h>
14080 @@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14084 + pax_force_retaddr
14086 ENDPROC(__clmul_gf128mul_ble)
14088 @@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14089 call __clmul_gf128mul_ble
14090 PSHUFB_XMM BSWAP DATA
14091 movups DATA, (%rdi)
14092 + pax_force_retaddr
14094 ENDPROC(clmul_ghash_mul)
14096 @@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14097 PSHUFB_XMM BSWAP DATA
14098 movups DATA, (%rdi)
14100 + pax_force_retaddr
14102 ENDPROC(clmul_ghash_update)
14103 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14104 index 9279e0b..c4b3d2c 100644
14105 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14106 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14108 #include <linux/linkage.h>
14109 +#include <asm/alternative-asm.h>
14111 # enter salsa20_encrypt_bytes
14112 ENTRY(salsa20_encrypt_bytes)
14113 @@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14117 + pax_force_retaddr
14121 @@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14125 + pax_force_retaddr
14127 ENDPROC(salsa20_keysetup)
14129 @@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14133 + pax_force_retaddr
14135 ENDPROC(salsa20_ivsetup)
14136 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14137 index 2f202f4..d9164d6 100644
14138 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14139 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14143 #include <linux/linkage.h>
14144 +#include <asm/alternative-asm.h>
14145 #include "glue_helper-asm-avx.S"
14147 .file "serpent-avx-x86_64-asm_64.S"
14148 @@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14149 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14150 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14152 + pax_force_retaddr
14154 ENDPROC(__serpent_enc_blk8_avx)
14156 @@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14157 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14158 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14160 + pax_force_retaddr
14162 ENDPROC(__serpent_dec_blk8_avx)
14164 @@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14166 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14168 + pax_force_retaddr
14170 ENDPROC(serpent_ecb_enc_8way_avx)
14172 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14174 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14176 + pax_force_retaddr
14178 ENDPROC(serpent_ecb_dec_8way_avx)
14180 @@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14182 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14184 + pax_force_retaddr
14186 ENDPROC(serpent_cbc_dec_8way_avx)
14188 @@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14190 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14192 + pax_force_retaddr
14194 ENDPROC(serpent_ctr_8way_avx)
14196 @@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14197 /* dst <= regs xor IVs(in dst) */
14198 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14200 + pax_force_retaddr
14202 ENDPROC(serpent_xts_enc_8way_avx)
14204 @@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14205 /* dst <= regs xor IVs(in dst) */
14206 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14208 + pax_force_retaddr
14210 ENDPROC(serpent_xts_dec_8way_avx)
14211 diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14212 index b222085..abd483c 100644
14213 --- a/arch/x86/crypto/serpent-avx2-asm_64.S
14214 +++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14218 #include <linux/linkage.h>
14219 +#include <asm/alternative-asm.h>
14220 #include "glue_helper-asm-avx2.S"
14222 .file "serpent-avx2-asm_64.S"
14223 @@ -610,6 +611,7 @@ __serpent_enc_blk16:
14224 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14225 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14227 + pax_force_retaddr
14229 ENDPROC(__serpent_enc_blk16)
14231 @@ -664,6 +666,7 @@ __serpent_dec_blk16:
14232 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14233 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14235 + pax_force_retaddr
14237 ENDPROC(__serpent_dec_blk16)
14239 @@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14243 + pax_force_retaddr
14245 ENDPROC(serpent_ecb_enc_16way)
14247 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14251 + pax_force_retaddr
14253 ENDPROC(serpent_ecb_dec_16way)
14255 @@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14259 + pax_force_retaddr
14261 ENDPROC(serpent_cbc_dec_16way)
14263 @@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14267 + pax_force_retaddr
14269 ENDPROC(serpent_ctr_16way)
14271 @@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14275 + pax_force_retaddr
14277 ENDPROC(serpent_xts_enc_16way)
14279 @@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14283 + pax_force_retaddr
14285 ENDPROC(serpent_xts_dec_16way)
14286 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14287 index acc066c..1559cc4 100644
14288 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14289 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14293 #include <linux/linkage.h>
14294 +#include <asm/alternative-asm.h>
14296 .file "serpent-sse2-x86_64-asm_64.S"
14298 @@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14299 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14300 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14302 + pax_force_retaddr
14306 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14307 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14309 + pax_force_retaddr
14311 ENDPROC(__serpent_enc_blk_8way)
14313 @@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14314 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14315 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14317 + pax_force_retaddr
14319 ENDPROC(serpent_dec_blk_8way)
14320 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14321 index a410950..9dfe7ad 100644
14322 --- a/arch/x86/crypto/sha1_ssse3_asm.S
14323 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
14327 #include <linux/linkage.h>
14328 +#include <asm/alternative-asm.h>
14330 #define CTX %rdi // arg1
14331 #define BUF %rsi // arg2
14341 sub $64, %rsp # allocate workspace
14342 and $~15, %rsp # align stack
14344 @@ -99,11 +100,12 @@
14348 - mov %r12, %rsp # deallocate workspace
14349 + mov %r14, %rsp # deallocate workspace
14355 + pax_force_retaddr
14359 diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14360 index 92b3b5d..0dc1dcb 100644
14361 --- a/arch/x86/crypto/sha256-avx-asm.S
14362 +++ b/arch/x86/crypto/sha256-avx-asm.S
14365 #ifdef CONFIG_AS_AVX
14366 #include <linux/linkage.h>
14367 +#include <asm/alternative-asm.h>
14369 ## assume buffers not aligned
14370 #define VMOVDQ vmovdqu
14371 @@ -460,6 +461,7 @@ done_hash:
14375 + pax_force_retaddr
14377 ENDPROC(sha256_transform_avx)
14379 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14380 index 570ec5e..cf2b625 100644
14381 --- a/arch/x86/crypto/sha256-avx2-asm.S
14382 +++ b/arch/x86/crypto/sha256-avx2-asm.S
14385 #ifdef CONFIG_AS_AVX2
14386 #include <linux/linkage.h>
14387 +#include <asm/alternative-asm.h>
14389 ## assume buffers not aligned
14390 #define VMOVDQ vmovdqu
14391 @@ -720,6 +721,7 @@ done_hash:
14395 + pax_force_retaddr
14397 ENDPROC(sha256_transform_rorx)
14399 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14400 index 2cedc44..5144899 100644
14401 --- a/arch/x86/crypto/sha256-ssse3-asm.S
14402 +++ b/arch/x86/crypto/sha256-ssse3-asm.S
14404 ########################################################################
14406 #include <linux/linkage.h>
14407 +#include <asm/alternative-asm.h>
14409 ## assume buffers not aligned
14410 #define MOVDQ movdqu
14411 @@ -471,6 +472,7 @@ done_hash:
14415 + pax_force_retaddr
14417 ENDPROC(sha256_transform_ssse3)
14419 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14420 index 565274d..af6bc08 100644
14421 --- a/arch/x86/crypto/sha512-avx-asm.S
14422 +++ b/arch/x86/crypto/sha512-avx-asm.S
14425 #ifdef CONFIG_AS_AVX
14426 #include <linux/linkage.h>
14427 +#include <asm/alternative-asm.h>
14431 @@ -364,6 +365,7 @@ updateblock:
14432 mov frame_RSPSAVE(%rsp), %rsp
14435 + pax_force_retaddr
14437 ENDPROC(sha512_transform_avx)
14439 diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14440 index 1f20b35..f25c8c1 100644
14441 --- a/arch/x86/crypto/sha512-avx2-asm.S
14442 +++ b/arch/x86/crypto/sha512-avx2-asm.S
14445 #ifdef CONFIG_AS_AVX2
14446 #include <linux/linkage.h>
14447 +#include <asm/alternative-asm.h>
14451 @@ -678,6 +679,7 @@ done_hash:
14453 # Restore Stack Pointer
14454 mov frame_RSPSAVE(%rsp), %rsp
14455 + pax_force_retaddr
14457 ENDPROC(sha512_transform_rorx)
14459 diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14460 index e610e29..ffcb5ed 100644
14461 --- a/arch/x86/crypto/sha512-ssse3-asm.S
14462 +++ b/arch/x86/crypto/sha512-ssse3-asm.S
14464 ########################################################################
14466 #include <linux/linkage.h>
14467 +#include <asm/alternative-asm.h>
14471 @@ -363,6 +364,7 @@ updateblock:
14472 mov frame_RSPSAVE(%rsp), %rsp
14475 + pax_force_retaddr
14477 ENDPROC(sha512_transform_ssse3)
14479 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14480 index 0505813..b067311 100644
14481 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14482 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14486 #include <linux/linkage.h>
14487 +#include <asm/alternative-asm.h>
14488 #include "glue_helper-asm-avx.S"
14490 .file "twofish-avx-x86_64-asm_64.S"
14491 @@ -284,6 +285,7 @@ __twofish_enc_blk8:
14492 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14493 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14495 + pax_force_retaddr
14497 ENDPROC(__twofish_enc_blk8)
14499 @@ -324,6 +326,7 @@ __twofish_dec_blk8:
14500 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14501 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14503 + pax_force_retaddr
14505 ENDPROC(__twofish_dec_blk8)
14507 @@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14509 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14511 + pax_force_retaddr
14513 ENDPROC(twofish_ecb_enc_8way)
14515 @@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14517 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14519 + pax_force_retaddr
14521 ENDPROC(twofish_ecb_dec_8way)
14523 @@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14534 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14536 call __twofish_dec_blk8;
14538 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14539 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14544 + pax_force_retaddr
14546 ENDPROC(twofish_cbc_dec_8way)
14548 @@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14549 * %rcx: iv (little endian, 128bit)
14559 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14560 RD2, RX0, RX1, RY0);
14562 call __twofish_enc_blk8;
14564 - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14565 + store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14570 + pax_force_retaddr
14572 ENDPROC(twofish_ctr_8way)
14574 @@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14575 /* dst <= regs xor IVs(in dst) */
14576 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14578 + pax_force_retaddr
14580 ENDPROC(twofish_xts_enc_8way)
14582 @@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14583 /* dst <= regs xor IVs(in dst) */
14584 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14586 + pax_force_retaddr
14588 ENDPROC(twofish_xts_dec_8way)
14589 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14590 index 1c3b7ce..02f578d 100644
14591 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14592 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14596 #include <linux/linkage.h>
14597 +#include <asm/alternative-asm.h>
14599 .file "twofish-x86_64-asm-3way.S"
14601 @@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14605 + pax_force_retaddr
14609 @@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14613 + pax_force_retaddr
14615 ENDPROC(__twofish_enc_blk_3way)
14617 @@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14621 + pax_force_retaddr
14623 ENDPROC(twofish_dec_blk_3way)
14624 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14625 index a350c99..c1bac24 100644
14626 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14627 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14630 #include <linux/linkage.h>
14631 #include <asm/asm-offsets.h>
14632 +#include <asm/alternative-asm.h>
14636 @@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14640 + pax_force_retaddr
14642 ENDPROC(twofish_enc_blk)
14644 @@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14648 + pax_force_retaddr
14650 ENDPROC(twofish_dec_blk)
14651 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14652 index ae6aad1..719d6d9 100644
14653 --- a/arch/x86/ia32/ia32_aout.c
14654 +++ b/arch/x86/ia32/ia32_aout.c
14655 @@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14656 unsigned long dump_start, dump_size;
14657 struct user32 dump;
14659 + memset(&dump, 0, sizeof(dump));
14664 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14665 index c81d35e6..3500144 100644
14666 --- a/arch/x86/ia32/ia32_signal.c
14667 +++ b/arch/x86/ia32/ia32_signal.c
14668 @@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
14669 if (__get_user(set.sig[0], &frame->sc.oldmask)
14670 || (_COMPAT_NSIG_WORDS > 1
14671 && __copy_from_user((((char *) &set.sig) + 4),
14672 - &frame->extramask,
14673 + frame->extramask,
14674 sizeof(frame->extramask))))
14677 @@ -335,7 +335,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14679 /* Align the stack pointer according to the i386 ABI,
14680 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14681 - sp = ((sp + 4) & -16ul) - 4;
14682 + sp = ((sp - 12) & -16ul) - 4;
14683 return (void __user *) sp;
14686 @@ -380,10 +380,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14688 /* Return stub is in 32bit vsyscall page */
14689 if (current->mm->context.vdso)
14690 - restorer = current->mm->context.vdso +
14691 - selected_vdso32->sym___kernel_sigreturn;
14692 + restorer = (void __force_user *)(current->mm->context.vdso +
14693 + selected_vdso32->sym___kernel_sigreturn);
14695 - restorer = &frame->retcode;
14696 + restorer = frame->retcode;
14700 @@ -393,7 +393,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14701 * These are actually not used anymore, but left because some
14702 * gdb versions depend on them as a marker.
14704 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14705 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14706 } put_user_catch(err);
14709 @@ -435,7 +435,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14711 __NR_ia32_rt_sigreturn,
14717 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14718 @@ -458,16 +458,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14720 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14721 restorer = ksig->ka.sa.sa_restorer;
14722 + else if (current->mm->context.vdso)
14723 + /* Return stub is in 32bit vsyscall page */
14724 + restorer = (void __force_user *)(current->mm->context.vdso +
14725 + selected_vdso32->sym___kernel_rt_sigreturn);
14727 - restorer = current->mm->context.vdso +
14728 - selected_vdso32->sym___kernel_rt_sigreturn;
14729 + restorer = frame->retcode;
14730 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14733 * Not actually used anymore, but left because some gdb
14734 * versions need it.
14736 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14737 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14738 } put_user_catch(err);
14740 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14741 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14742 index 72bf268..127572a 100644
14743 --- a/arch/x86/ia32/ia32entry.S
14744 +++ b/arch/x86/ia32/ia32entry.S
14746 #include <asm/irqflags.h>
14747 #include <asm/asm.h>
14748 #include <asm/smap.h>
14749 +#include <asm/pgtable.h>
14750 #include <linux/linkage.h>
14751 #include <linux/err.h>
14752 +#include <asm/alternative-asm.h>
14754 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14755 #include <linux/elf-em.h>
14756 @@ -85,6 +87,32 @@ ENTRY(native_irq_enable_sysexit)
14757 ENDPROC(native_irq_enable_sysexit)
14760 + .macro pax_enter_kernel_user
14761 + pax_set_fptr_mask
14762 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14763 + call pax_enter_kernel_user
14767 + .macro pax_exit_kernel_user
14768 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14769 + call pax_exit_kernel_user
14771 +#ifdef CONFIG_PAX_RANDKSTACK
14774 + call pax_randomize_kstack
14780 + .macro pax_erase_kstack
14781 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14782 + call pax_erase_kstack
14787 * 32bit SYSENTER instruction entry.
14789 @@ -119,23 +147,24 @@ ENTRY(ia32_sysenter_target)
14790 * it is too small to ever cause noticeable irq latency.
14792 SWAPGS_UNSAFE_STACK
14793 - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
14794 - ENABLE_INTERRUPTS(CLBR_NONE)
14795 + movq PER_CPU_VAR(kernel_stack), %rsp
14797 /* Zero-extending 32-bit regs, do not remove */
14801 - movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
14802 - CFI_REGISTER rip,r10
14803 + GET_THREAD_INFO(%r11)
14804 + movl TI_sysenter_return(%r11), %r11d
14805 + CFI_REGISTER rip,r11
14807 /* Construct struct pt_regs on stack */
14808 pushq_cfi $__USER32_DS /* pt_regs->ss */
14809 pushq_cfi %rbp /* pt_regs->sp */
14810 CFI_REL_OFFSET rsp,0
14811 pushfq_cfi /* pt_regs->flags */
14812 + orl $X86_EFLAGS_IF,(%rsp)
14813 pushq_cfi $__USER32_CS /* pt_regs->cs */
14814 - pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
14815 + pushq_cfi %r11 /* pt_regs->ip = thread_info->sysenter_return */
14816 CFI_REL_OFFSET rip,0
14817 pushq_cfi_reg rax /* pt_regs->orig_ax */
14818 pushq_cfi_reg rdi /* pt_regs->di */
14819 @@ -147,15 +176,37 @@ ENTRY(ia32_sysenter_target)
14820 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
14821 CFI_ADJUST_CFA_OFFSET 10*8
14823 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14824 + movq_cfi r12, R12
14827 + pax_enter_kernel_user
14829 +#ifdef CONFIG_PAX_RANDKSTACK
14833 + ENABLE_INTERRUPTS(CLBR_NONE)
14836 * no need to do an access_ok check here because rbp has been
14837 * 32bit zero extended
14840 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14841 + addq pax_user_shadow_base,%rbp
14842 + ASM_PAX_OPEN_USERLAND
14846 1: movl (%rbp),%ebp
14847 _ASM_EXTABLE(1b,ia32_badarg)
14850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14851 + ASM_PAX_CLOSE_USERLAND
14855 * Sysenter doesn't filter flags, so we need to clear NT
14856 * ourselves. To save a few cycles, we can check whether
14857 @@ -165,8 +216,9 @@ ENTRY(ia32_sysenter_target)
14858 jnz sysenter_fix_flags
14859 sysenter_flags_fixed:
14861 - orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
14862 - testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14863 + GET_THREAD_INFO(%r11)
14864 + orl $TS_COMPAT,TI_status(%r11)
14865 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14867 jnz sysenter_tracesys
14868 cmpq $(IA32_NR_syscalls-1),%rax
14869 @@ -181,9 +233,10 @@ sysenter_do_call:
14871 call *ia32_sys_call_table(,%rax,8)
14872 movq %rax,RAX(%rsp)
14873 + GET_THREAD_INFO(%r11)
14874 DISABLE_INTERRUPTS(CLBR_NONE)
14876 - testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14877 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14879 sysexit_from_sys_call:
14881 @@ -196,7 +249,9 @@ sysexit_from_sys_call:
14882 * This code path is still called 'sysexit' because it pairs
14883 * with 'sysenter' and it uses the SYSENTER calling convention.
14885 - andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
14886 + pax_exit_kernel_user
14888 + andl $~TS_COMPAT,TI_status(%r11)
14889 movl RIP(%rsp),%ecx /* User %eip */
14890 CFI_REGISTER rip,rcx
14892 @@ -247,6 +302,9 @@ sysexit_from_sys_call:
14893 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14894 movl %eax,%edi /* 1st arg: syscall number */
14895 call __audit_syscall_entry
14899 movl RAX(%rsp),%eax /* reload syscall number */
14900 cmpq $(IA32_NR_syscalls-1),%rax
14902 @@ -258,7 +316,7 @@ sysexit_from_sys_call:
14905 .macro auditsys_exit exit
14906 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14907 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14908 jnz ia32_ret_from_sys_call
14910 ENABLE_INTERRUPTS(CLBR_NONE)
14911 @@ -269,11 +327,12 @@ sysexit_from_sys_call:
14912 1: setbe %al /* 1 if error, 0 if not */
14913 movzbl %al,%edi /* zero-extend that into %edi */
14914 call __audit_syscall_exit
14915 + GET_THREAD_INFO(%r11)
14916 movq RAX(%rsp),%rax /* reload syscall return value */
14917 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14918 DISABLE_INTERRUPTS(CLBR_NONE)
14920 - testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14921 + testl %edi,TI_flags(%r11)
14925 @@ -295,7 +354,7 @@ sysenter_fix_flags:
14928 #ifdef CONFIG_AUDITSYSCALL
14929 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14930 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14931 jz sysenter_auditsys
14934 @@ -307,6 +366,9 @@ sysenter_tracesys:
14936 cmpq $(IA32_NR_syscalls-1),%rax
14937 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14941 jmp sysenter_do_call
14943 ENDPROC(ia32_sysenter_target)
14944 @@ -357,7 +419,6 @@ ENTRY(ia32_cstar_target)
14946 CFI_REGISTER rsp,r8
14947 movq PER_CPU_VAR(kernel_stack),%rsp
14948 - ENABLE_INTERRUPTS(CLBR_NONE)
14950 /* Zero-extending 32-bit regs, do not remove */
14952 @@ -380,16 +441,41 @@ ENTRY(ia32_cstar_target)
14953 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
14954 CFI_ADJUST_CFA_OFFSET 10*8
14956 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14957 + movq_cfi r12, R12
14960 + pax_enter_kernel_user
14962 +#ifdef CONFIG_PAX_RANDKSTACK
14966 + ENABLE_INTERRUPTS(CLBR_NONE)
14969 * no need to do an access_ok check here because r8 has been
14970 * 32bit zero extended
14973 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14974 + ASM_PAX_OPEN_USERLAND
14975 + movq pax_user_shadow_base,%r8
14976 + addq RSP(%rsp),%r8
14981 _ASM_EXTABLE(1b,ia32_badarg)
14983 - orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
14984 - testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
14986 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14987 + ASM_PAX_CLOSE_USERLAND
14990 + GET_THREAD_INFO(%r11)
14991 + orl $TS_COMPAT,TI_status(%r11)
14992 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14995 cmpq $IA32_NR_syscalls-1,%rax
14996 @@ -404,12 +490,15 @@ cstar_do_call:
14998 call *ia32_sys_call_table(,%rax,8)
14999 movq %rax,RAX(%rsp)
15000 + GET_THREAD_INFO(%r11)
15001 DISABLE_INTERRUPTS(CLBR_NONE)
15003 - testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
15004 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15006 sysretl_from_sys_call:
15007 - andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
15008 + pax_exit_kernel_user
15010 + andl $~TS_COMPAT,TI_status(%r11)
15011 RESTORE_RSI_RDI_RDX
15012 movl RIP(%rsp),%ecx
15013 CFI_REGISTER rip,rcx
15014 @@ -451,7 +540,7 @@ sysretl_audit:
15017 #ifdef CONFIG_AUDITSYSCALL
15018 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
15019 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15023 @@ -465,11 +554,19 @@ cstar_tracesys:
15025 cmpq $(IA32_NR_syscalls-1),%rax
15026 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15031 END(ia32_cstar_target)
15036 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15037 + ASM_PAX_CLOSE_USERLAND
15043 @@ -505,14 +602,8 @@ ENTRY(ia32_syscall)
15044 /*CFI_REL_OFFSET cs,1*8 */
15045 CFI_REL_OFFSET rip,0*8
15048 - * Interrupts are off on entry.
15049 - * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
15050 - * it is too small to ever cause noticeable irq latency.
15052 PARAVIRT_ADJUST_EXCEPTION_FRAME
15054 - ENABLE_INTERRUPTS(CLBR_NONE)
15056 /* Zero-extending 32-bit regs, do not remove */
15058 @@ -528,8 +619,26 @@ ENTRY(ia32_syscall)
15059 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
15060 CFI_ADJUST_CFA_OFFSET 10*8
15062 - orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
15063 - testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
15064 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065 + movq_cfi r12, R12
15068 + pax_enter_kernel_user
15070 +#ifdef CONFIG_PAX_RANDKSTACK
15075 + * Interrupts are off on entry.
15076 + * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
15077 + * it is too small to ever cause noticeable irq latency.
15079 + ENABLE_INTERRUPTS(CLBR_NONE)
15081 + GET_THREAD_INFO(%r11)
15082 + orl $TS_COMPAT,TI_status(%r11)
15083 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15085 cmpq $(IA32_NR_syscalls-1),%rax
15087 @@ -557,6 +666,9 @@ ia32_tracesys:
15089 cmpq $(IA32_NR_syscalls-1),%rax
15090 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15097 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15098 index 719cd70..69d576b 100644
15099 --- a/arch/x86/ia32/sys_ia32.c
15100 +++ b/arch/x86/ia32/sys_ia32.c
15101 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15103 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15105 - typeof(ubuf->st_uid) uid = 0;
15106 - typeof(ubuf->st_gid) gid = 0;
15107 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
15108 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
15109 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15110 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15111 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15112 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15113 index bdf02ee..51a4656 100644
15114 --- a/arch/x86/include/asm/alternative-asm.h
15115 +++ b/arch/x86/include/asm/alternative-asm.h
15120 +#ifdef KERNEXEC_PLUGIN
15121 + .macro pax_force_retaddr_bts rip=0
15122 + btsq $63,\rip(%rsp)
15124 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15125 + .macro pax_force_retaddr rip=0, reload=0
15126 + btsq $63,\rip(%rsp)
15128 + .macro pax_force_fptr ptr
15131 + .macro pax_set_fptr_mask
15134 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15135 + .macro pax_force_retaddr rip=0, reload=0
15137 + pax_set_fptr_mask
15139 + orq %r12,\rip(%rsp)
15141 + .macro pax_force_fptr ptr
15144 + .macro pax_set_fptr_mask
15145 + movabs $0x8000000000000000,%r12
15149 + .macro pax_force_retaddr rip=0, reload=0
15151 + .macro pax_force_fptr ptr
15153 + .macro pax_force_retaddr_bts rip=0
15155 + .macro pax_set_fptr_mask
15159 .macro altinstruction_entry orig alt feature orig_len alt_len pad_len
15163 altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
15166 - .pushsection .altinstr_replacement,"ax"
15167 + .pushsection .altinstr_replacement,"a"
15172 altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
15175 - .pushsection .altinstr_replacement,"ax"
15176 + .pushsection .altinstr_replacement,"a"
15180 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15181 index ba32af0..ff42fc0 100644
15182 --- a/arch/x86/include/asm/alternative.h
15183 +++ b/arch/x86/include/asm/alternative.h
15184 @@ -130,7 +130,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15185 ".pushsection .altinstructions,\"a\"\n" \
15186 ALTINSTR_ENTRY(feature, 1) \
15188 - ".pushsection .altinstr_replacement, \"ax\"\n" \
15189 + ".pushsection .altinstr_replacement, \"a\"\n" \
15190 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15193 @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15194 ALTINSTR_ENTRY(feature1, 1) \
15195 ALTINSTR_ENTRY(feature2, 2) \
15197 - ".pushsection .altinstr_replacement, \"ax\"\n" \
15198 + ".pushsection .altinstr_replacement, \"a\"\n" \
15199 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15200 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15202 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15203 index 976b86a..f3bc83a 100644
15204 --- a/arch/x86/include/asm/apic.h
15205 +++ b/arch/x86/include/asm/apic.h
15206 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15208 #ifdef CONFIG_X86_LOCAL_APIC
15210 -extern unsigned int apic_verbosity;
15211 +extern int apic_verbosity;
15212 extern int local_apic_timer_c2_ok;
15214 extern int disable_apic;
15215 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15216 index 20370c6..a2eb9b0 100644
15217 --- a/arch/x86/include/asm/apm.h
15218 +++ b/arch/x86/include/asm/apm.h
15219 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15220 __asm__ __volatile__(APM_DO_ZERO_SEGS
15223 - "lcall *%%cs:apm_bios_entry\n\t"
15224 + "lcall *%%ss:apm_bios_entry\n\t"
15228 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15229 __asm__ __volatile__(APM_DO_ZERO_SEGS
15232 - "lcall *%%cs:apm_bios_entry\n\t"
15233 + "lcall *%%ss:apm_bios_entry\n\t"
15237 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15238 index 5e5cd12..51cdc93 100644
15239 --- a/arch/x86/include/asm/atomic.h
15240 +++ b/arch/x86/include/asm/atomic.h
15241 @@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15245 + * atomic_read_unchecked - read atomic variable
15246 + * @v: pointer of type atomic_unchecked_t
15248 + * Atomically reads the value of @v.
15250 +static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15252 + return ACCESS_ONCE((v)->counter);
15256 * atomic_set - set atomic variable
15257 * @v: pointer of type atomic_t
15258 * @i: required value
15259 @@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15263 + * atomic_set_unchecked - set atomic variable
15264 + * @v: pointer of type atomic_unchecked_t
15265 + * @i: required value
15267 + * Atomically sets the value of @v to @i.
15269 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15275 * atomic_add - add integer to atomic variable
15276 * @i: integer value to add
15277 * @v: pointer of type atomic_t
15278 @@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15280 static inline void atomic_add(int i, atomic_t *v)
15282 - asm volatile(LOCK_PREFIX "addl %1,%0"
15283 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
15285 +#ifdef CONFIG_PAX_REFCOUNT
15287 + LOCK_PREFIX "subl %1,%0\n"
15289 + _ASM_EXTABLE(0b, 0b)
15292 + : "+m" (v->counter)
15297 + * atomic_add_unchecked - add integer to atomic variable
15298 + * @i: integer value to add
15299 + * @v: pointer of type atomic_unchecked_t
15301 + * Atomically adds @i to @v.
15303 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15305 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
15306 : "+m" (v->counter)
15309 @@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15311 static inline void atomic_sub(int i, atomic_t *v)
15313 - asm volatile(LOCK_PREFIX "subl %1,%0"
15314 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
15316 +#ifdef CONFIG_PAX_REFCOUNT
15318 + LOCK_PREFIX "addl %1,%0\n"
15320 + _ASM_EXTABLE(0b, 0b)
15323 + : "+m" (v->counter)
15328 + * atomic_sub_unchecked - subtract integer from atomic variable
15329 + * @i: integer value to subtract
15330 + * @v: pointer of type atomic_unchecked_t
15332 + * Atomically subtracts @i from @v.
15334 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15336 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
15337 : "+m" (v->counter)
15340 @@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15342 static inline int atomic_sub_and_test(int i, atomic_t *v)
15344 - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15345 + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15349 @@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15351 static inline void atomic_inc(atomic_t *v)
15353 - asm volatile(LOCK_PREFIX "incl %0"
15354 + asm volatile(LOCK_PREFIX "incl %0\n"
15356 +#ifdef CONFIG_PAX_REFCOUNT
15358 + LOCK_PREFIX "decl %0\n"
15360 + _ASM_EXTABLE(0b, 0b)
15363 + : "+m" (v->counter));
15367 + * atomic_inc_unchecked - increment atomic variable
15368 + * @v: pointer of type atomic_unchecked_t
15370 + * Atomically increments @v by 1.
15372 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15374 + asm volatile(LOCK_PREFIX "incl %0\n"
15375 : "+m" (v->counter));
15378 @@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15380 static inline void atomic_dec(atomic_t *v)
15382 - asm volatile(LOCK_PREFIX "decl %0"
15383 + asm volatile(LOCK_PREFIX "decl %0\n"
15385 +#ifdef CONFIG_PAX_REFCOUNT
15387 + LOCK_PREFIX "incl %0\n"
15389 + _ASM_EXTABLE(0b, 0b)
15392 + : "+m" (v->counter));
15396 + * atomic_dec_unchecked - decrement atomic variable
15397 + * @v: pointer of type atomic_unchecked_t
15399 + * Atomically decrements @v by 1.
15401 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15403 + asm volatile(LOCK_PREFIX "decl %0\n"
15404 : "+m" (v->counter));
15407 @@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15409 static inline int atomic_dec_and_test(atomic_t *v)
15411 - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15412 + GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15416 @@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15418 static inline int atomic_inc_and_test(atomic_t *v)
15420 - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15421 + GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15425 + * atomic_inc_and_test_unchecked - increment and test
15426 + * @v: pointer of type atomic_unchecked_t
15428 + * Atomically increments @v by 1
15429 + * and returns true if the result is zero, or false for all
15432 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15434 + GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15438 @@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15440 static inline int atomic_add_negative(int i, atomic_t *v)
15442 - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15443 + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15447 @@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15449 * Atomically adds @i to @v and returns @i + @v
15451 -static inline int atomic_add_return(int i, atomic_t *v)
15452 +static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15454 + return i + xadd_check_overflow(&v->counter, i);
15458 + * atomic_add_return_unchecked - add integer and return
15459 + * @i: integer value to add
15460 + * @v: pointer of type atomic_unchecked_t
15462 + * Atomically adds @i to @v and returns @i + @v
15464 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15466 return i + xadd(&v->counter, i);
15468 @@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15470 * Atomically subtracts @i from @v and returns @v - @i
15472 -static inline int atomic_sub_return(int i, atomic_t *v)
15473 +static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15475 return atomic_add_return(-i, v);
15478 #define atomic_inc_return(v) (atomic_add_return(1, v))
15479 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15481 + return atomic_add_return_unchecked(1, v);
15483 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15485 -static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15486 +static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15488 + return cmpxchg(&v->counter, old, new);
15491 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15493 return cmpxchg(&v->counter, old, new);
15495 @@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15496 return xchg(&v->counter, new);
15499 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15501 + return xchg(&v->counter, new);
15505 * __atomic_add_unless - add unless the number is already a given value
15506 * @v: pointer of type atomic_t
15507 @@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15509 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15513 c = atomic_read(v);
15515 - if (unlikely(c == (u)))
15516 + if (unlikely(c == u))
15518 - old = atomic_cmpxchg((v), c, c + (a));
15520 + asm volatile("addl %2,%0\n"
15522 +#ifdef CONFIG_PAX_REFCOUNT
15526 + _ASM_EXTABLE(0b, 0b)
15530 + : "0" (c), "ir" (a));
15532 + old = atomic_cmpxchg(v, c, new);
15533 if (likely(old == c))
15536 @@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15540 + * atomic_inc_not_zero_hint - increment if not null
15541 + * @v: pointer of type atomic_t
15542 + * @hint: probable value of the atomic before the increment
15544 + * This version of atomic_inc_not_zero() gives a hint of probable
15545 + * value of the atomic. This helps processor to not read the memory
15546 + * before doing the atomic read/modify/write cycle, lowering
15547 + * number of bus transactions on some arches.
15549 + * Returns: 0 if increment was not done, 1 otherwise.
15551 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15552 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15554 + int val, c = hint, new;
15556 + /* sanity test, should be removed by compiler if hint is a constant */
15558 + return __atomic_add_unless(v, 1, 0);
15561 + asm volatile("incl %0\n"
15563 +#ifdef CONFIG_PAX_REFCOUNT
15567 + _ASM_EXTABLE(0b, 0b)
15573 + val = atomic_cmpxchg(v, c, new);
15583 * atomic_inc_short - increment of a short integer
15584 * @v: pointer to type int
15586 @@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15589 /* These are x86-specific, used by some header files */
15590 -#define atomic_clear_mask(mask, addr) \
15591 - asm volatile(LOCK_PREFIX "andl %0,%1" \
15592 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
15593 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15595 + asm volatile(LOCK_PREFIX "andl %1,%0"
15596 + : "+m" (v->counter)
15601 -#define atomic_set_mask(mask, addr) \
15602 - asm volatile(LOCK_PREFIX "orl %0,%1" \
15603 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15605 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15607 + asm volatile(LOCK_PREFIX "andl %1,%0"
15608 + : "+m" (v->counter)
15613 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15615 + asm volatile(LOCK_PREFIX "orl %1,%0"
15616 + : "+m" (v->counter)
15621 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15623 + asm volatile(LOCK_PREFIX "orl %1,%0"
15624 + : "+m" (v->counter)
15629 #ifdef CONFIG_X86_32
15630 # include <asm/atomic64_32.h>
15631 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15632 index b154de7..bf18a5a 100644
15633 --- a/arch/x86/include/asm/atomic64_32.h
15634 +++ b/arch/x86/include/asm/atomic64_32.h
15635 @@ -12,6 +12,14 @@ typedef struct {
15636 u64 __aligned(8) counter;
15639 +#ifdef CONFIG_PAX_REFCOUNT
15641 + u64 __aligned(8) counter;
15642 +} atomic64_unchecked_t;
15644 +typedef atomic64_t atomic64_unchecked_t;
15647 #define ATOMIC64_INIT(val) { (val) }
15649 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15650 @@ -37,21 +45,31 @@ typedef struct {
15651 ATOMIC64_DECL_ONE(sym##_386)
15653 ATOMIC64_DECL_ONE(add_386);
15654 +ATOMIC64_DECL_ONE(add_unchecked_386);
15655 ATOMIC64_DECL_ONE(sub_386);
15656 +ATOMIC64_DECL_ONE(sub_unchecked_386);
15657 ATOMIC64_DECL_ONE(inc_386);
15658 +ATOMIC64_DECL_ONE(inc_unchecked_386);
15659 ATOMIC64_DECL_ONE(dec_386);
15660 +ATOMIC64_DECL_ONE(dec_unchecked_386);
15663 #define alternative_atomic64(f, out, in...) \
15664 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15666 ATOMIC64_DECL(read);
15667 +ATOMIC64_DECL(read_unchecked);
15668 ATOMIC64_DECL(set);
15669 +ATOMIC64_DECL(set_unchecked);
15670 ATOMIC64_DECL(xchg);
15671 ATOMIC64_DECL(add_return);
15672 +ATOMIC64_DECL(add_return_unchecked);
15673 ATOMIC64_DECL(sub_return);
15674 +ATOMIC64_DECL(sub_return_unchecked);
15675 ATOMIC64_DECL(inc_return);
15676 +ATOMIC64_DECL(inc_return_unchecked);
15677 ATOMIC64_DECL(dec_return);
15678 +ATOMIC64_DECL(dec_return_unchecked);
15679 ATOMIC64_DECL(dec_if_positive);
15680 ATOMIC64_DECL(inc_not_zero);
15681 ATOMIC64_DECL(add_unless);
15682 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15686 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15687 + * @p: pointer to type atomic64_unchecked_t
15688 + * @o: expected value
15691 + * Atomically sets @v to @n if it was equal to @o and returns
15695 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15697 + return cmpxchg64(&v->counter, o, n);
15701 * atomic64_xchg - xchg atomic64 variable
15702 * @v: pointer to type atomic64_t
15703 * @n: value to assign
15704 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15708 + * atomic64_set_unchecked - set atomic64 variable
15709 + * @v: pointer to type atomic64_unchecked_t
15710 + * @n: value to assign
15712 + * Atomically sets the value of @v to @n.
15714 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15716 + unsigned high = (unsigned)(i >> 32);
15717 + unsigned low = (unsigned)i;
15718 + alternative_atomic64(set, /* no output */,
15719 + "S" (v), "b" (low), "c" (high)
15720 + : "eax", "edx", "memory");
15724 * atomic64_read - read atomic64 variable
15725 * @v: pointer to type atomic64_t
15727 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15731 + * atomic64_read_unchecked - read atomic64 variable
15732 + * @v: pointer to type atomic64_unchecked_t
15734 + * Atomically reads the value of @v and returns it.
15736 +static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15739 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15744 * atomic64_add_return - add and return
15745 * @i: integer value to add
15746 * @v: pointer to type atomic64_t
15747 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15752 + * atomic64_add_return_unchecked - add and return
15753 + * @i: integer value to add
15754 + * @v: pointer to type atomic64_unchecked_t
15756 + * Atomically adds @i to @v and returns @i + *@v
15758 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15760 + alternative_atomic64(add_return_unchecked,
15761 + ASM_OUTPUT2("+A" (i), "+c" (v)),
15762 + ASM_NO_INPUT_CLOBBER("memory"));
15767 * Other variants with different arithmetic operators:
15769 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15773 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15776 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
15777 + "S" (v) : "memory", "ecx");
15781 static inline long long atomic64_dec_return(atomic64_t *v)
15784 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15788 + * atomic64_add_unchecked - add integer to atomic64 variable
15789 + * @i: integer value to add
15790 + * @v: pointer to type atomic64_unchecked_t
15792 + * Atomically adds @i to @v.
15794 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15796 + __alternative_atomic64(add_unchecked, add_return_unchecked,
15797 + ASM_OUTPUT2("+A" (i), "+c" (v)),
15798 + ASM_NO_INPUT_CLOBBER("memory"));
15803 * atomic64_sub - subtract the atomic64 variable
15804 * @i: integer value to subtract
15805 * @v: pointer to type atomic64_t
15806 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15807 index f8d273e..02f39f3 100644
15808 --- a/arch/x86/include/asm/atomic64_64.h
15809 +++ b/arch/x86/include/asm/atomic64_64.h
15810 @@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15814 + * atomic64_read_unchecked - read atomic64 variable
15815 + * @v: pointer of type atomic64_unchecked_t
15817 + * Atomically reads the value of @v.
15818 + * Doesn't imply a read memory barrier.
15820 +static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15822 + return ACCESS_ONCE((v)->counter);
15826 * atomic64_set - set atomic64 variable
15827 * @v: pointer to type atomic64_t
15828 * @i: required value
15829 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15833 + * atomic64_set_unchecked - set atomic64 variable
15834 + * @v: pointer to type atomic64_unchecked_t
15835 + * @i: required value
15837 + * Atomically sets the value of @v to @i.
15839 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15845 * atomic64_add - add integer to atomic64 variable
15846 * @i: integer value to add
15847 * @v: pointer to type atomic64_t
15848 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15850 static inline void atomic64_add(long i, atomic64_t *v)
15852 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
15854 +#ifdef CONFIG_PAX_REFCOUNT
15856 + LOCK_PREFIX "subq %1,%0\n"
15858 + _ASM_EXTABLE(0b, 0b)
15861 + : "=m" (v->counter)
15862 + : "er" (i), "m" (v->counter));
15866 + * atomic64_add_unchecked - add integer to atomic64 variable
15867 + * @i: integer value to add
15868 + * @v: pointer to type atomic64_unchecked_t
15870 + * Atomically adds @i to @v.
15872 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15874 asm volatile(LOCK_PREFIX "addq %1,%0"
15875 : "=m" (v->counter)
15876 : "er" (i), "m" (v->counter));
15877 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15879 static inline void atomic64_sub(long i, atomic64_t *v)
15881 - asm volatile(LOCK_PREFIX "subq %1,%0"
15882 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
15884 +#ifdef CONFIG_PAX_REFCOUNT
15886 + LOCK_PREFIX "addq %1,%0\n"
15888 + _ASM_EXTABLE(0b, 0b)
15891 + : "=m" (v->counter)
15892 + : "er" (i), "m" (v->counter));
15896 + * atomic64_sub_unchecked - subtract the atomic64 variable
15897 + * @i: integer value to subtract
15898 + * @v: pointer to type atomic64_unchecked_t
15900 + * Atomically subtracts @i from @v.
15902 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15904 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
15905 : "=m" (v->counter)
15906 : "er" (i), "m" (v->counter));
15908 @@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15910 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15912 - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15913 + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15917 @@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15919 static inline void atomic64_inc(atomic64_t *v)
15921 + asm volatile(LOCK_PREFIX "incq %0\n"
15923 +#ifdef CONFIG_PAX_REFCOUNT
15925 + LOCK_PREFIX "decq %0\n"
15927 + _ASM_EXTABLE(0b, 0b)
15930 + : "=m" (v->counter)
15931 + : "m" (v->counter));
15935 + * atomic64_inc_unchecked - increment atomic64 variable
15936 + * @v: pointer to type atomic64_unchecked_t
15938 + * Atomically increments @v by 1.
15940 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15942 asm volatile(LOCK_PREFIX "incq %0"
15943 : "=m" (v->counter)
15944 : "m" (v->counter));
15945 @@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15947 static inline void atomic64_dec(atomic64_t *v)
15949 - asm volatile(LOCK_PREFIX "decq %0"
15950 + asm volatile(LOCK_PREFIX "decq %0\n"
15952 +#ifdef CONFIG_PAX_REFCOUNT
15954 + LOCK_PREFIX "incq %0\n"
15956 + _ASM_EXTABLE(0b, 0b)
15959 + : "=m" (v->counter)
15960 + : "m" (v->counter));
15964 + * atomic64_dec_unchecked - decrement atomic64 variable
15965 + * @v: pointer to type atomic64_t
15967 + * Atomically decrements @v by 1.
15969 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15971 + asm volatile(LOCK_PREFIX "decq %0\n"
15972 : "=m" (v->counter)
15973 : "m" (v->counter));
15975 @@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15977 static inline int atomic64_dec_and_test(atomic64_t *v)
15979 - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15980 + GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15984 @@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15986 static inline int atomic64_inc_and_test(atomic64_t *v)
15988 - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15989 + GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15993 @@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15995 static inline int atomic64_add_negative(long i, atomic64_t *v)
15997 - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15998 + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16002 @@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16004 static inline long atomic64_add_return(long i, atomic64_t *v)
16006 + return i + xadd_check_overflow(&v->counter, i);
16010 + * atomic64_add_return_unchecked - add and return
16011 + * @i: integer value to add
16012 + * @v: pointer to type atomic64_unchecked_t
16014 + * Atomically adds @i to @v and returns @i + @v
16016 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16018 return i + xadd(&v->counter, i);
16021 @@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16024 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16025 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16027 + return atomic64_add_return_unchecked(1, v);
16029 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16031 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16032 @@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16033 return cmpxchg(&v->counter, old, new);
16036 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16038 + return cmpxchg(&v->counter, old, new);
16041 static inline long atomic64_xchg(atomic64_t *v, long new)
16043 return xchg(&v->counter, new);
16044 @@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16046 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16049 + long c, old, new;
16050 c = atomic64_read(v);
16052 - if (unlikely(c == (u)))
16053 + if (unlikely(c == u))
16055 - old = atomic64_cmpxchg((v), c, c + (a));
16057 + asm volatile("add %2,%0\n"
16059 +#ifdef CONFIG_PAX_REFCOUNT
16063 + _ASM_EXTABLE(0b, 0b)
16067 + : "0" (c), "ir" (a));
16069 + old = atomic64_cmpxchg(v, c, new);
16070 if (likely(old == c))
16078 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16079 diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16080 index 959e45b..6ea9bf6 100644
16081 --- a/arch/x86/include/asm/barrier.h
16082 +++ b/arch/x86/include/asm/barrier.h
16085 compiletime_assert_atomic_type(*p); \
16087 - ACCESS_ONCE(*p) = (v); \
16088 + ACCESS_ONCE_RW(*p) = (v); \
16091 #define smp_load_acquire(p) \
16092 @@ -74,7 +74,7 @@ do { \
16094 compiletime_assert_atomic_type(*p); \
16096 - ACCESS_ONCE(*p) = (v); \
16097 + ACCESS_ONCE_RW(*p) = (v); \
16100 #define smp_load_acquire(p) \
16101 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16102 index cfe3b95..d01b118 100644
16103 --- a/arch/x86/include/asm/bitops.h
16104 +++ b/arch/x86/include/asm/bitops.h
16106 * a mask operation on a byte.
16108 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16109 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16110 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16111 #define CONST_MASK(nr) (1 << ((nr) & 7))
16114 @@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16116 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16118 - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16119 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16123 @@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16125 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16127 - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16128 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16132 @@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16134 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16136 - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16137 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16140 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16141 @@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16143 * Undefined if no bit exists, so code should check against 0 first.
16145 -static inline unsigned long __ffs(unsigned long word)
16146 +static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16148 asm("rep; bsf %1,%0"
16150 @@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16152 * Undefined if no zero exists, so code should check against ~0UL first.
16154 -static inline unsigned long ffz(unsigned long word)
16155 +static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16157 asm("rep; bsf %1,%0"
16159 @@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16161 * Undefined if no set bit exists, so code should check against 0 first.
16163 -static inline unsigned long __fls(unsigned long word)
16164 +static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16168 @@ -434,7 +434,7 @@ static inline int ffs(int x)
16169 * set bit if value is nonzero. The last (most significant) bit is
16172 -static inline int fls(int x)
16173 +static inline int __intentional_overflow(-1) fls(int x)
16177 @@ -476,7 +476,7 @@ static inline int fls(int x)
16180 #ifdef CONFIG_X86_64
16181 -static __always_inline int fls64(__u64 x)
16182 +static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16186 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16187 index 4fa687a..60f2d39 100644
16188 --- a/arch/x86/include/asm/boot.h
16189 +++ b/arch/x86/include/asm/boot.h
16191 #include <uapi/asm/boot.h>
16193 /* Physical address where kernel should be loaded. */
16194 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16195 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16196 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16197 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16199 +#ifndef __ASSEMBLY__
16200 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
16201 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16204 /* Minimum kernel alignment, as a power of two */
16205 #ifdef CONFIG_X86_64
16206 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16207 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16208 index 48f99f1..d78ebf9 100644
16209 --- a/arch/x86/include/asm/cache.h
16210 +++ b/arch/x86/include/asm/cache.h
16213 /* L1 cache line size */
16214 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16215 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16216 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16218 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16219 +#define __read_only __attribute__((__section__(".data..read_only")))
16221 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16222 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16223 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16225 #ifdef CONFIG_X86_VSMP
16227 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16228 index 1c8b50e..166bcaa 100644
16229 --- a/arch/x86/include/asm/calling.h
16230 +++ b/arch/x86/include/asm/calling.h
16231 @@ -96,23 +96,26 @@ For 32-bit we have the following conventions - kernel is built with
16234 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
16235 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16236 + movq_cfi r12, R12+\offset
16239 - movq_cfi r11, 6*8+\offset
16240 + movq_cfi r11, R11+\offset
16243 - movq_cfi r10, 7*8+\offset
16244 - movq_cfi r9, 8*8+\offset
16245 - movq_cfi r8, 9*8+\offset
16246 + movq_cfi r10, R10+\offset
16247 + movq_cfi r9, R9+\offset
16248 + movq_cfi r8, R8+\offset
16251 - movq_cfi rax, 10*8+\offset
16252 + movq_cfi rax, RAX+\offset
16255 - movq_cfi rcx, 11*8+\offset
16256 + movq_cfi rcx, RCX+\offset
16258 - movq_cfi rdx, 12*8+\offset
16259 - movq_cfi rsi, 13*8+\offset
16260 - movq_cfi rdi, 14*8+\offset
16261 + movq_cfi rdx, RDX+\offset
16262 + movq_cfi rsi, RSI+\offset
16263 + movq_cfi rdi, RDI+\offset
16265 .macro SAVE_C_REGS offset=0
16266 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
16267 @@ -131,76 +134,87 @@ For 32-bit we have the following conventions - kernel is built with
16270 .macro SAVE_EXTRA_REGS offset=0
16271 - movq_cfi r15, 0*8+\offset
16272 - movq_cfi r14, 1*8+\offset
16273 - movq_cfi r13, 2*8+\offset
16274 - movq_cfi r12, 3*8+\offset
16275 - movq_cfi rbp, 4*8+\offset
16276 - movq_cfi rbx, 5*8+\offset
16277 + movq_cfi r15, R15+\offset
16278 + movq_cfi r14, R14+\offset
16279 + movq_cfi r13, R13+\offset
16280 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16281 + movq_cfi r12, R12+\offset
16283 + movq_cfi rbp, RBP+\offset
16284 + movq_cfi rbx, RBX+\offset
16286 .macro SAVE_EXTRA_REGS_RBP offset=0
16287 - movq_cfi rbp, 4*8+\offset
16288 + movq_cfi rbp, RBP+\offset
16291 .macro RESTORE_EXTRA_REGS offset=0
16292 - movq_cfi_restore 0*8+\offset, r15
16293 - movq_cfi_restore 1*8+\offset, r14
16294 - movq_cfi_restore 2*8+\offset, r13
16295 - movq_cfi_restore 3*8+\offset, r12
16296 - movq_cfi_restore 4*8+\offset, rbp
16297 - movq_cfi_restore 5*8+\offset, rbx
16298 + movq_cfi_restore R15+\offset, r15
16299 + movq_cfi_restore R14+\offset, r14
16300 + movq_cfi_restore R13+\offset, r13
16301 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16302 + movq_cfi_restore R12+\offset, r12
16304 + movq_cfi_restore RBP+\offset, rbp
16305 + movq_cfi_restore RBX+\offset, rbx
16308 .macro ZERO_EXTRA_REGS
16312 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16319 - .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
16320 + .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
16321 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16323 + movq_cfi_restore R12, r12
16327 - movq_cfi_restore 6*8, r11
16328 + movq_cfi_restore R11, r11
16331 - movq_cfi_restore 7*8, r10
16332 - movq_cfi_restore 8*8, r9
16333 - movq_cfi_restore 9*8, r8
16334 + movq_cfi_restore R10, r10
16335 + movq_cfi_restore R9, r9
16336 + movq_cfi_restore R8, r8
16339 - movq_cfi_restore 10*8, rax
16340 + movq_cfi_restore RAX, rax
16343 - movq_cfi_restore 11*8, rcx
16344 + movq_cfi_restore RCX, rcx
16347 - movq_cfi_restore 12*8, rdx
16348 + movq_cfi_restore RDX, rdx
16350 - movq_cfi_restore 13*8, rsi
16351 - movq_cfi_restore 14*8, rdi
16352 + movq_cfi_restore RSI, rsi
16353 + movq_cfi_restore RDI, rdi
16355 .macro RESTORE_C_REGS
16356 - RESTORE_C_REGS_HELPER 1,1,1,1,1
16357 + RESTORE_C_REGS_HELPER 1,1,1,1,1,1
16359 .macro RESTORE_C_REGS_EXCEPT_RAX
16360 - RESTORE_C_REGS_HELPER 0,1,1,1,1
16361 + RESTORE_C_REGS_HELPER 0,1,1,1,1,0
16363 .macro RESTORE_C_REGS_EXCEPT_RCX
16364 - RESTORE_C_REGS_HELPER 1,0,1,1,1
16365 + RESTORE_C_REGS_HELPER 1,0,1,1,1,0
16367 .macro RESTORE_C_REGS_EXCEPT_R11
16368 - RESTORE_C_REGS_HELPER 1,1,0,1,1
16369 + RESTORE_C_REGS_HELPER 1,1,0,1,1,1
16371 .macro RESTORE_C_REGS_EXCEPT_RCX_R11
16372 - RESTORE_C_REGS_HELPER 1,0,0,1,1
16373 + RESTORE_C_REGS_HELPER 1,0,0,1,1,1
16375 .macro RESTORE_RSI_RDI
16376 - RESTORE_C_REGS_HELPER 0,0,0,0,0
16377 + RESTORE_C_REGS_HELPER 0,0,0,0,0,1
16379 .macro RESTORE_RSI_RDI_RDX
16380 - RESTORE_C_REGS_HELPER 0,0,0,0,1
16381 + RESTORE_C_REGS_HELPER 0,0,0,0,1,1
16384 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
16385 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16386 index f50de69..2b0a458 100644
16387 --- a/arch/x86/include/asm/checksum_32.h
16388 +++ b/arch/x86/include/asm/checksum_32.h
16389 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16390 int len, __wsum sum,
16391 int *src_err_ptr, int *dst_err_ptr);
16393 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16394 + int len, __wsum sum,
16395 + int *src_err_ptr, int *dst_err_ptr);
16397 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16398 + int len, __wsum sum,
16399 + int *src_err_ptr, int *dst_err_ptr);
16402 * Note: when you get a NULL pointer exception here this means someone
16403 * passed in an incorrect kernel address to one of these functions.
16404 @@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16408 - ret = csum_partial_copy_generic((__force void *)src, dst,
16409 + ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16410 len, sum, err_ptr, NULL);
16413 @@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16415 if (access_ok(VERIFY_WRITE, dst, len)) {
16417 - ret = csum_partial_copy_generic(src, (__force void *)dst,
16418 + ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16419 len, sum, NULL, err_ptr);
16422 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16423 index 99c105d7..2f667ac 100644
16424 --- a/arch/x86/include/asm/cmpxchg.h
16425 +++ b/arch/x86/include/asm/cmpxchg.h
16426 @@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16427 __compiletime_error("Bad argument size for cmpxchg");
16428 extern void __xadd_wrong_size(void)
16429 __compiletime_error("Bad argument size for xadd");
16430 +extern void __xadd_check_overflow_wrong_size(void)
16431 + __compiletime_error("Bad argument size for xadd_check_overflow");
16432 extern void __add_wrong_size(void)
16433 __compiletime_error("Bad argument size for add");
16434 +extern void __add_check_overflow_wrong_size(void)
16435 + __compiletime_error("Bad argument size for add_check_overflow");
16438 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16439 @@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16443 +#ifdef CONFIG_PAX_REFCOUNT
16444 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16446 + __typeof__ (*(ptr)) __ret = (arg); \
16447 + switch (sizeof(*(ptr))) { \
16448 + case __X86_CASE_L: \
16449 + asm volatile (lock #op "l %0, %1\n" \
16453 + _ASM_EXTABLE(0b, 0b) \
16454 + : "+r" (__ret), "+m" (*(ptr)) \
16455 + : : "memory", "cc"); \
16457 + case __X86_CASE_Q: \
16458 + asm volatile (lock #op "q %q0, %1\n" \
16462 + _ASM_EXTABLE(0b, 0b) \
16463 + : "+r" (__ret), "+m" (*(ptr)) \
16464 + : : "memory", "cc"); \
16467 + __ ## op ## _check_overflow_wrong_size(); \
16472 +#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16476 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16477 * Since this is generally used to protect other memory information, we
16478 @@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16479 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16480 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16482 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16483 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16485 #define __add(ptr, inc, lock) \
16487 __typeof__ (*(ptr)) __ret = (inc); \
16488 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16489 index acdee09..a553db3 100644
16490 --- a/arch/x86/include/asm/compat.h
16491 +++ b/arch/x86/include/asm/compat.h
16492 @@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16493 typedef u32 compat_uint_t;
16494 typedef u32 compat_ulong_t;
16495 typedef u64 __attribute__((aligned(4))) compat_u64;
16496 -typedef u32 compat_uptr_t;
16497 +typedef u32 __user compat_uptr_t;
16499 struct compat_timespec {
16500 compat_time_t tv_sec;
16501 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16502 index 3d6606f..91703f1 100644
16503 --- a/arch/x86/include/asm/cpufeature.h
16504 +++ b/arch/x86/include/asm/cpufeature.h
16505 @@ -214,7 +214,7 @@
16506 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16507 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16508 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16510 +#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16512 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16513 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16514 @@ -222,7 +222,7 @@
16515 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16516 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16517 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16518 -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16519 +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16520 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16521 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16522 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16523 @@ -401,6 +401,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16524 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16525 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16526 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16527 +#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16530 extern void warn_pre_alternatives(void);
16531 @@ -454,7 +455,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16533 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16535 - warn_pre_alternatives();
16536 + if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16537 + warn_pre_alternatives();
16541 @@ -475,7 +477,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16542 ".section .discard,\"aw\",@progbits\n"
16543 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16545 - ".section .altinstr_replacement,\"ax\"\n"
16546 + ".section .altinstr_replacement,\"a\"\n"
16550 @@ -510,7 +512,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16551 " .byte 5f - 4f\n" /* repl len */
16552 " .byte 3b - 2b\n" /* pad len */
16554 - ".section .altinstr_replacement,\"ax\"\n"
16555 + ".section .altinstr_replacement,\"a\"\n"
16556 "4: jmp %l[t_no]\n"
16559 @@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16560 ".section .discard,\"aw\",@progbits\n"
16561 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16563 - ".section .altinstr_replacement,\"ax\"\n"
16564 + ".section .altinstr_replacement,\"a\"\n"
16568 @@ -560,7 +562,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16569 ".section .discard,\"aw\",@progbits\n"
16570 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16572 - ".section .altinstr_replacement,\"ax\"\n"
16573 + ".section .altinstr_replacement,\"a\"\n"
16577 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16578 index a0bf89f..56f0b2a 100644
16579 --- a/arch/x86/include/asm/desc.h
16580 +++ b/arch/x86/include/asm/desc.h
16582 #include <asm/desc_defs.h>
16583 #include <asm/ldt.h>
16584 #include <asm/mmu.h>
16585 +#include <asm/pgtable.h>
16587 #include <linux/smp.h>
16588 #include <linux/percpu.h>
16589 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16591 desc->type = (info->read_exec_only ^ 1) << 1;
16592 desc->type |= info->contents << 2;
16593 + desc->type |= info->seg_not_present ^ 1;
16597 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16600 extern struct desc_ptr idt_descr;
16601 -extern gate_desc idt_table[];
16602 -extern struct desc_ptr debug_idt_descr;
16603 -extern gate_desc debug_idt_table[];
16606 - struct desc_struct gdt[GDT_ENTRIES];
16607 -} __attribute__((aligned(PAGE_SIZE)));
16609 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16610 +extern gate_desc idt_table[IDT_ENTRIES];
16611 +extern const struct desc_ptr debug_idt_descr;
16612 +extern gate_desc debug_idt_table[IDT_ENTRIES];
16614 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16615 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16617 - return per_cpu(gdt_page, cpu).gdt;
16618 + return cpu_gdt_table[cpu];
16621 #ifdef CONFIG_X86_64
16622 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16623 unsigned long base, unsigned dpl, unsigned flags,
16624 unsigned short seg)
16626 - gate->a = (seg << 16) | (base & 0xffff);
16627 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16628 + gate->gate.offset_low = base;
16629 + gate->gate.seg = seg;
16630 + gate->gate.reserved = 0;
16631 + gate->gate.type = type;
16632 + gate->gate.s = 0;
16633 + gate->gate.dpl = dpl;
16634 + gate->gate.p = 1;
16635 + gate->gate.offset_high = base >> 16;
16639 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16641 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16643 + pax_open_kernel();
16644 memcpy(&idt[entry], gate, sizeof(*gate));
16645 + pax_close_kernel();
16648 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16650 + pax_open_kernel();
16651 memcpy(&ldt[entry], desc, 8);
16652 + pax_close_kernel();
16656 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16657 default: size = sizeof(*gdt); break;
16660 + pax_open_kernel();
16661 memcpy(&gdt[entry], desc, size);
16662 + pax_close_kernel();
16665 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16666 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16668 static inline void native_load_tr_desc(void)
16670 + pax_open_kernel();
16671 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16672 + pax_close_kernel();
16675 static inline void native_load_gdt(const struct desc_ptr *dtr)
16676 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16677 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16680 + pax_open_kernel();
16681 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16682 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16683 + pax_close_kernel();
16686 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16687 @@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16691 -static inline unsigned long get_desc_base(const struct desc_struct *desc)
16692 +static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16694 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16696 @@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16699 #ifdef CONFIG_X86_64
16700 -static inline void set_nmi_gate(int gate, void *addr)
16701 +static inline void set_nmi_gate(int gate, const void *addr)
16705 @@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16708 #ifdef CONFIG_TRACING
16709 -extern struct desc_ptr trace_idt_descr;
16710 -extern gate_desc trace_idt_table[];
16711 +extern const struct desc_ptr trace_idt_descr;
16712 +extern gate_desc trace_idt_table[IDT_ENTRIES];
16713 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16715 write_idt_entry(trace_idt_table, entry, gate);
16718 -static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16719 +static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16720 unsigned dpl, unsigned ist, unsigned seg)
16723 @@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16724 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16727 -static inline void _set_gate(int gate, unsigned type, void *addr,
16728 +static inline void _set_gate(int gate, unsigned type, const void *addr,
16729 unsigned dpl, unsigned ist, unsigned seg)
16732 @@ -379,14 +392,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16733 #define set_intr_gate_notrace(n, addr) \
16735 BUG_ON((unsigned)n > 0xFF); \
16736 - _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16737 + _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16741 #define set_intr_gate(n, addr) \
16743 set_intr_gate_notrace(n, addr); \
16744 - _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16745 + _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16746 0, 0, __KERNEL_CS); \
16749 @@ -414,19 +427,19 @@ static inline void alloc_system_vector(int vector)
16751 * This routine sets up an interrupt gate at directory privilege level 3.
16753 -static inline void set_system_intr_gate(unsigned int n, void *addr)
16754 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
16756 BUG_ON((unsigned)n > 0xFF);
16757 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16760 -static inline void set_system_trap_gate(unsigned int n, void *addr)
16761 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
16763 BUG_ON((unsigned)n > 0xFF);
16764 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16767 -static inline void set_trap_gate(unsigned int n, void *addr)
16768 +static inline void set_trap_gate(unsigned int n, const void *addr)
16770 BUG_ON((unsigned)n > 0xFF);
16771 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16772 @@ -435,16 +448,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16773 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16775 BUG_ON((unsigned)n > 0xFF);
16776 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16777 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16780 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16781 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16783 BUG_ON((unsigned)n > 0xFF);
16784 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16787 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16788 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16790 BUG_ON((unsigned)n > 0xFF);
16791 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16792 @@ -516,4 +529,17 @@ static inline void load_current_idt(void)
16794 load_idt((const struct desc_ptr *)&idt_descr);
16797 +#ifdef CONFIG_X86_32
16798 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16800 + struct desc_struct d;
16802 + if (likely(limit))
16803 + limit = (limit - 1UL) >> PAGE_SHIFT;
16804 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
16805 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16809 #endif /* _ASM_X86_DESC_H */
16810 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16811 index 278441f..b95a174 100644
16812 --- a/arch/x86/include/asm/desc_defs.h
16813 +++ b/arch/x86/include/asm/desc_defs.h
16814 @@ -31,6 +31,12 @@ struct desc_struct {
16815 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16816 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16821 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16822 + unsigned offset_high: 16;
16825 } __attribute__((packed));
16827 diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16828 index ced283a..ffe04cc 100644
16829 --- a/arch/x86/include/asm/div64.h
16830 +++ b/arch/x86/include/asm/div64.h
16835 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16836 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16840 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16841 index f161c18..97d43e8 100644
16842 --- a/arch/x86/include/asm/elf.h
16843 +++ b/arch/x86/include/asm/elf.h
16844 @@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16846 #include <asm/vdso.h>
16848 -#ifdef CONFIG_X86_64
16849 -extern unsigned int vdso64_enabled;
16851 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16852 extern unsigned int vdso32_enabled;
16854 @@ -250,7 +247,25 @@ extern int force_personality32;
16855 the loader. We need to make sure that it is out of the way of the program
16856 that it will "exec", and that there is sufficient room for the brk. */
16858 +#ifdef CONFIG_PAX_SEGMEXEC
16859 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16861 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16864 +#ifdef CONFIG_PAX_ASLR
16865 +#ifdef CONFIG_X86_32
16866 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16868 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16869 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16871 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
16873 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16874 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16878 /* This yields a mask that user programs can use to figure out what
16879 instruction set this CPU supports. This could be done in user space,
16880 @@ -299,17 +314,13 @@ do { \
16882 #define ARCH_DLINFO \
16884 - if (vdso64_enabled) \
16885 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16886 - (unsigned long __force)current->mm->context.vdso); \
16887 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16890 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16891 #define ARCH_DLINFO_X32 \
16893 - if (vdso64_enabled) \
16894 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16895 - (unsigned long __force)current->mm->context.vdso); \
16896 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16899 #define AT_SYSINFO 32
16900 @@ -324,10 +335,10 @@ else \
16902 #endif /* !CONFIG_X86_32 */
16904 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16905 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16907 #define VDSO_ENTRY \
16908 - ((unsigned long)current->mm->context.vdso + \
16909 + (current->mm->context.vdso + \
16910 selected_vdso32->sym___kernel_vsyscall)
16912 struct linux_binprm;
16913 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16914 index 77a99ac..39ff7f5 100644
16915 --- a/arch/x86/include/asm/emergency-restart.h
16916 +++ b/arch/x86/include/asm/emergency-restart.h
16918 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16919 #define _ASM_X86_EMERGENCY_RESTART_H
16921 -extern void machine_emergency_restart(void);
16922 +extern void machine_emergency_restart(void) __noreturn;
16924 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16925 diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16926 index 1c7eefe..d0e4702 100644
16927 --- a/arch/x86/include/asm/floppy.h
16928 +++ b/arch/x86/include/asm/floppy.h
16929 @@ -229,18 +229,18 @@ static struct fd_routine_l {
16930 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16938 + ._request_dma = request_dma,
16939 + ._free_dma = free_dma,
16940 + ._get_dma_residue = get_dma_residue,
16941 + ._dma_mem_alloc = dma_mem_alloc,
16942 + ._dma_setup = hard_dma_setup
16945 - vdma_request_dma,
16947 - vdma_get_dma_residue,
16950 + ._request_dma = vdma_request_dma,
16951 + ._free_dma = vdma_nop,
16952 + ._get_dma_residue = vdma_get_dma_residue,
16953 + ._dma_mem_alloc = vdma_mem_alloc,
16954 + ._dma_setup = vdma_dma_setup
16958 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16959 index da5e967..ab07eec 100644
16960 --- a/arch/x86/include/asm/fpu-internal.h
16961 +++ b/arch/x86/include/asm/fpu-internal.h
16962 @@ -151,8 +151,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16963 #define user_insn(insn, output, input...) \
16966 + pax_open_userland(); \
16967 asm volatile(ASM_STAC "\n" \
16968 - "1:" #insn "\n\t" \
16972 "2: " ASM_CLAC "\n" \
16973 ".section .fixup,\"ax\"\n" \
16974 "3: movl $-1,%[err]\n" \
16975 @@ -161,6 +164,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16976 _ASM_EXTABLE(1b, 3b) \
16977 : [err] "=r" (err), output \
16978 : "0"(0), input); \
16979 + pax_close_userland(); \
16983 @@ -327,7 +331,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16986 "fildl %P[addr]" /* set F?P to defined value */
16987 - : : [addr] "m" (tsk->thread.fpu.has_fpu));
16988 + : : [addr] "m" (cpu_tss[raw_smp_processor_id()].x86_tss.sp0));
16991 return fpu_restore_checking(&tsk->thread.fpu);
16992 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16993 index b4c1f54..e290c08 100644
16994 --- a/arch/x86/include/asm/futex.h
16995 +++ b/arch/x86/include/asm/futex.h
16997 #include <asm/smap.h>
16999 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17000 + typecheck(u32 __user *, uaddr); \
17001 asm volatile("\t" ASM_STAC "\n" \
17003 "2:\t" ASM_CLAC "\n" \
17004 @@ -20,15 +21,16 @@
17007 _ASM_EXTABLE(1b, 3b) \
17008 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17009 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17010 : "i" (-EFAULT), "0" (oparg), "1" (0))
17012 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17013 + typecheck(u32 __user *, uaddr); \
17014 asm volatile("\t" ASM_STAC "\n" \
17015 "1:\tmovl %2, %0\n" \
17016 "\tmovl\t%0, %3\n" \
17018 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17019 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17021 "3:\t" ASM_CLAC "\n" \
17022 "\t.section .fixup,\"ax\"\n" \
17024 _ASM_EXTABLE(1b, 4b) \
17025 _ASM_EXTABLE(2b, 4b) \
17026 : "=&a" (oldval), "=&r" (ret), \
17027 - "+m" (*uaddr), "=&r" (tem) \
17028 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17029 : "r" (oparg), "i" (-EFAULT), "1" (0))
17031 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17032 @@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17034 pagefault_disable();
17036 + pax_open_userland();
17039 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17040 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17043 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17044 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17048 @@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17052 + pax_close_userland();
17054 pagefault_enable();
17056 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17057 index e9571dd..df5f542 100644
17058 --- a/arch/x86/include/asm/hw_irq.h
17059 +++ b/arch/x86/include/asm/hw_irq.h
17060 @@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17061 #endif /* CONFIG_X86_LOCAL_APIC */
17064 -extern atomic_t irq_err_count;
17065 -extern atomic_t irq_mis_count;
17066 +extern atomic_unchecked_t irq_err_count;
17067 +extern atomic_unchecked_t irq_mis_count;
17070 extern void eisa_set_level_irq(unsigned int irq);
17071 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17072 index ccffa53..3c90c87 100644
17073 --- a/arch/x86/include/asm/i8259.h
17074 +++ b/arch/x86/include/asm/i8259.h
17075 @@ -62,7 +62,7 @@ struct legacy_pic {
17076 void (*init)(int auto_eoi);
17077 int (*irq_pending)(unsigned int irq);
17078 void (*make_irq)(unsigned int irq);
17082 extern struct legacy_pic *legacy_pic;
17083 extern struct legacy_pic null_legacy_pic;
17084 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17085 index 34a5b93..27e40a6 100644
17086 --- a/arch/x86/include/asm/io.h
17087 +++ b/arch/x86/include/asm/io.h
17088 @@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17089 "m" (*(volatile type __force *)addr) barrier); }
17091 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17092 -build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17093 -build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17094 +build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17095 +build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17097 build_mmio_read(__readb, "b", unsigned char, "=q", )
17098 -build_mmio_read(__readw, "w", unsigned short, "=r", )
17099 -build_mmio_read(__readl, "l", unsigned int, "=r", )
17100 +build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17101 +build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17103 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17104 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17105 @@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17109 -static inline phys_addr_t virt_to_phys(volatile void *address)
17110 +static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17112 return __pa(address);
17114 @@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17115 return ioremap_nocache(offset, size);
17118 -extern void iounmap(volatile void __iomem *addr);
17119 +extern void iounmap(const volatile void __iomem *addr);
17121 extern void set_iounmap_nonlazy(void);
17123 @@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17125 #include <linux/vmalloc.h>
17127 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17128 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17130 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17133 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17135 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17139 * Convert a virtual cached pointer to an uncached pointer
17141 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17142 index b77f5ed..a2f791e 100644
17143 --- a/arch/x86/include/asm/irqflags.h
17144 +++ b/arch/x86/include/asm/irqflags.h
17145 @@ -137,6 +137,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17149 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
17150 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17151 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
17152 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17155 #define INTERRUPT_RETURN iret
17156 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17157 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17158 index 4421b5d..8543006 100644
17159 --- a/arch/x86/include/asm/kprobes.h
17160 +++ b/arch/x86/include/asm/kprobes.h
17161 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17162 #define RELATIVEJUMP_SIZE 5
17163 #define RELATIVECALL_OPCODE 0xe8
17164 #define RELATIVE_ADDR_SIZE 4
17165 -#define MAX_STACK_SIZE 64
17166 -#define MIN_STACK_SIZE(ADDR) \
17167 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17168 - THREAD_SIZE - (unsigned long)(ADDR))) \
17169 - ? (MAX_STACK_SIZE) \
17170 - : (((unsigned long)current_thread_info()) + \
17171 - THREAD_SIZE - (unsigned long)(ADDR)))
17172 +#define MAX_STACK_SIZE 64UL
17173 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17175 #define flush_insn_slot(p) do { } while (0)
17177 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17178 index 4ad6560..75c7bdd 100644
17179 --- a/arch/x86/include/asm/local.h
17180 +++ b/arch/x86/include/asm/local.h
17181 @@ -10,33 +10,97 @@ typedef struct {
17186 + atomic_long_unchecked_t a;
17187 +} local_unchecked_t;
17189 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17191 #define local_read(l) atomic_long_read(&(l)->a)
17192 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17193 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17194 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17196 static inline void local_inc(local_t *l)
17198 - asm volatile(_ASM_INC "%0"
17199 + asm volatile(_ASM_INC "%0\n"
17201 +#ifdef CONFIG_PAX_REFCOUNT
17205 + _ASM_EXTABLE(0b, 0b)
17208 + : "+m" (l->a.counter));
17211 +static inline void local_inc_unchecked(local_unchecked_t *l)
17213 + asm volatile(_ASM_INC "%0\n"
17214 : "+m" (l->a.counter));
17217 static inline void local_dec(local_t *l)
17219 - asm volatile(_ASM_DEC "%0"
17220 + asm volatile(_ASM_DEC "%0\n"
17222 +#ifdef CONFIG_PAX_REFCOUNT
17226 + _ASM_EXTABLE(0b, 0b)
17229 + : "+m" (l->a.counter));
17232 +static inline void local_dec_unchecked(local_unchecked_t *l)
17234 + asm volatile(_ASM_DEC "%0\n"
17235 : "+m" (l->a.counter));
17238 static inline void local_add(long i, local_t *l)
17240 - asm volatile(_ASM_ADD "%1,%0"
17241 + asm volatile(_ASM_ADD "%1,%0\n"
17243 +#ifdef CONFIG_PAX_REFCOUNT
17245 + _ASM_SUB "%1,%0\n"
17247 + _ASM_EXTABLE(0b, 0b)
17250 + : "+m" (l->a.counter)
17254 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
17256 + asm volatile(_ASM_ADD "%1,%0\n"
17257 : "+m" (l->a.counter)
17261 static inline void local_sub(long i, local_t *l)
17263 - asm volatile(_ASM_SUB "%1,%0"
17264 + asm volatile(_ASM_SUB "%1,%0\n"
17266 +#ifdef CONFIG_PAX_REFCOUNT
17268 + _ASM_ADD "%1,%0\n"
17270 + _ASM_EXTABLE(0b, 0b)
17273 + : "+m" (l->a.counter)
17277 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17279 + asm volatile(_ASM_SUB "%1,%0\n"
17280 : "+m" (l->a.counter)
17283 @@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17285 static inline int local_sub_and_test(long i, local_t *l)
17287 - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17288 + GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17292 @@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17294 static inline int local_dec_and_test(local_t *l)
17296 - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17297 + GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17301 @@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17303 static inline int local_inc_and_test(local_t *l)
17305 - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17306 + GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17310 @@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17312 static inline int local_add_negative(long i, local_t *l)
17314 - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17315 + GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17319 @@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17320 static inline long local_add_return(long i, local_t *l)
17323 + asm volatile(_ASM_XADD "%0, %1\n"
17325 +#ifdef CONFIG_PAX_REFCOUNT
17327 + _ASM_MOV "%0,%1\n"
17329 + _ASM_EXTABLE(0b, 0b)
17332 + : "+r" (i), "+m" (l->a.counter)
17338 + * local_add_return_unchecked - add and return
17339 + * @i: integer value to add
17340 + * @l: pointer to type local_unchecked_t
17342 + * Atomically adds @i to @l and returns @i + @l
17344 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17347 asm volatile(_ASM_XADD "%0, %1;"
17348 : "+r" (i), "+m" (l->a.counter)
17350 @@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17352 #define local_cmpxchg(l, o, n) \
17353 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17354 +#define local_cmpxchg_unchecked(l, o, n) \
17355 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
17356 /* Always has a lock prefix */
17357 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17359 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17360 new file mode 100644
17361 index 0000000..2bfd3ba
17363 +++ b/arch/x86/include/asm/mman.h
17365 +#ifndef _X86_MMAN_H
17366 +#define _X86_MMAN_H
17368 +#include <uapi/asm/mman.h>
17371 +#ifndef __ASSEMBLY__
17372 +#ifdef CONFIG_X86_32
17373 +#define arch_mmap_check i386_mmap_check
17374 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17379 +#endif /* X86_MMAN_H */
17380 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17381 index 09b9620..923aecd 100644
17382 --- a/arch/x86/include/asm/mmu.h
17383 +++ b/arch/x86/include/asm/mmu.h
17385 * we put the segment information here.
17389 + struct desc_struct *ldt;
17392 #ifdef CONFIG_X86_64
17393 @@ -18,7 +18,19 @@ typedef struct {
17397 - void __user *vdso;
17398 + unsigned long vdso;
17400 +#ifdef CONFIG_X86_32
17401 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17402 + unsigned long user_cs_base;
17403 + unsigned long user_cs_limit;
17405 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17406 + cpumask_t cpu_user_cs_mask;
17412 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17414 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17415 index 883f6b93..bb405b5 100644
17416 --- a/arch/x86/include/asm/mmu_context.h
17417 +++ b/arch/x86/include/asm/mmu_context.h
17418 @@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17420 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17423 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17424 + if (!(static_cpu_has(X86_FEATURE_PCID))) {
17428 + pax_open_kernel();
17429 + pgd = get_cpu_pgd(smp_processor_id(), kernel);
17430 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17431 + set_pgd_batched(pgd+i, native_make_pgd(0));
17432 + pax_close_kernel();
17437 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17438 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17439 @@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17440 struct task_struct *tsk)
17442 unsigned cpu = smp_processor_id();
17443 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17444 + int tlbstate = TLBSTATE_OK;
17447 if (likely(prev != next)) {
17449 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17450 + tlbstate = this_cpu_read(cpu_tlbstate.state);
17452 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17453 this_cpu_write(cpu_tlbstate.active_mm, next);
17455 cpumask_set_cpu(cpu, mm_cpumask(next));
17457 /* Re-load page tables */
17458 +#ifdef CONFIG_PAX_PER_CPU_PGD
17459 + pax_open_kernel();
17461 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17462 + if (static_cpu_has(X86_FEATURE_PCID))
17463 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17468 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17469 + pax_close_kernel();
17470 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17472 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17473 + if (static_cpu_has(X86_FEATURE_PCID)) {
17474 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
17475 + u64 descriptor[2];
17476 + descriptor[0] = PCID_USER;
17477 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17478 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17479 + descriptor[0] = PCID_KERNEL;
17480 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17483 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17484 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17485 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17487 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17492 + load_cr3(get_cpu_pgd(cpu, kernel));
17494 load_cr3(next->pgd);
17496 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17498 /* Stop flush ipis for the previous mm */
17499 @@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17501 if (unlikely(prev->context.ldt != next->context.ldt))
17502 load_LDT_nolock(&next->context);
17504 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17505 + if (!(__supported_pte_mask & _PAGE_NX)) {
17506 + smp_mb__before_atomic();
17507 + cpumask_clear_cpu(cpu, &prev->context.cpu_user_cs_mask);
17508 + smp_mb__after_atomic();
17509 + cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
17513 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17514 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17515 + prev->context.user_cs_limit != next->context.user_cs_limit))
17516 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17518 + else if (unlikely(tlbstate != TLBSTATE_OK))
17519 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526 +#ifdef CONFIG_PAX_PER_CPU_PGD
17527 + pax_open_kernel();
17529 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17530 + if (static_cpu_has(X86_FEATURE_PCID))
17531 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17535 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17536 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17537 + pax_close_kernel();
17538 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17540 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17541 + if (static_cpu_has(X86_FEATURE_PCID)) {
17542 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
17543 + u64 descriptor[2];
17544 + descriptor[0] = PCID_USER;
17545 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17546 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17547 + descriptor[0] = PCID_KERNEL;
17548 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17551 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17552 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17553 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17555 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17560 + load_cr3(get_cpu_pgd(cpu, kernel));
17565 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17566 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17568 @@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17569 * tlb flush IPI delivery. We must reload CR3
17570 * to make sure to use no freed page tables.
17573 +#ifndef CONFIG_PAX_PER_CPU_PGD
17574 load_cr3(next->pgd);
17575 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17579 load_LDT_nolock(&next->context);
17581 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17582 + if (!(__supported_pte_mask & _PAGE_NX))
17583 + cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
17586 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17587 +#ifdef CONFIG_PAX_PAGEEXEC
17588 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17590 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17599 #define activate_mm(prev, next) \
17600 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17601 index e3b7819..b257c64 100644
17602 --- a/arch/x86/include/asm/module.h
17603 +++ b/arch/x86/include/asm/module.h
17606 #ifdef CONFIG_X86_64
17607 /* X86_64 does not define MODULE_PROC_FAMILY */
17608 +#define MODULE_PROC_FAMILY ""
17609 #elif defined CONFIG_M486
17610 #define MODULE_PROC_FAMILY "486 "
17611 #elif defined CONFIG_M586
17613 #error unknown processor family
17616 -#ifdef CONFIG_X86_32
17617 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17618 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17619 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17620 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17621 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17623 +#define MODULE_PAX_KERNEXEC ""
17626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17627 +#define MODULE_PAX_UDEREF "UDEREF "
17629 +#define MODULE_PAX_UDEREF ""
17632 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17634 #endif /* _ASM_X86_MODULE_H */
17635 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17636 index 5f2fc44..106caa6 100644
17637 --- a/arch/x86/include/asm/nmi.h
17638 +++ b/arch/x86/include/asm/nmi.h
17639 @@ -36,26 +36,35 @@ enum {
17641 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17646 + const struct nmiaction *action;
17647 + u64 max_duration;
17648 + struct irq_work irq_work;
17652 struct list_head list;
17653 nmi_handler_t handler;
17654 - u64 max_duration;
17655 - struct irq_work irq_work;
17656 unsigned long flags;
17659 + struct nmiwork *work;
17662 #define register_nmi_handler(t, fn, fg, n, init...) \
17664 - static struct nmiaction init fn##_na = { \
17665 + static struct nmiwork fn##_nw; \
17666 + static const struct nmiaction init fn##_na = { \
17670 + .work = &fn##_nw, \
17672 __register_nmi_handler((t), &fn##_na); \
17675 -int __register_nmi_handler(unsigned int, struct nmiaction *);
17676 +int __register_nmi_handler(unsigned int, const struct nmiaction *);
17678 void unregister_nmi_handler(unsigned int, const char *);
17680 diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17681 index 802dde3..9183e68 100644
17682 --- a/arch/x86/include/asm/page.h
17683 +++ b/arch/x86/include/asm/page.h
17684 @@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17685 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17687 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17688 +#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17690 #define __boot_va(x) __va(x)
17691 #define __boot_pa(x) __pa(x)
17692 @@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17693 * virt_to_page(kaddr) returns a valid pointer if and only if
17694 * virt_addr_valid(kaddr) returns true.
17696 -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17697 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17698 extern bool __virt_addr_valid(unsigned long kaddr);
17699 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17701 +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17702 +#define virt_to_page(kaddr) \
17704 + const void *__kaddr = (const void *)(kaddr); \
17705 + BUG_ON(!virt_addr_valid(__kaddr)); \
17706 + pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17709 +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17712 #endif /* __ASSEMBLY__ */
17714 #include <asm-generic/memory_model.h>
17715 diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17716 index b3bebf9..13ac22e 100644
17717 --- a/arch/x86/include/asm/page_64.h
17718 +++ b/arch/x86/include/asm/page_64.h
17721 /* duplicated to the one in bootmem.h */
17722 extern unsigned long max_pfn;
17723 -extern unsigned long phys_base;
17724 +extern const unsigned long phys_base;
17726 -static inline unsigned long __phys_addr_nodebug(unsigned long x)
17727 +static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17729 unsigned long y = x - __START_KERNEL_map;
17731 @@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17734 #ifdef CONFIG_DEBUG_VIRTUAL
17735 -extern unsigned long __phys_addr(unsigned long);
17736 -extern unsigned long __phys_addr_symbol(unsigned long);
17737 +extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17738 +extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17740 #define __phys_addr(x) __phys_addr_nodebug(x)
17741 #define __phys_addr_symbol(x) \
17742 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17743 index 8957810..f34efb4 100644
17744 --- a/arch/x86/include/asm/paravirt.h
17745 +++ b/arch/x86/include/asm/paravirt.h
17746 @@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17747 return (pmd_t) { ret };
17750 -static inline pmdval_t pmd_val(pmd_t pmd)
17751 +static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17755 @@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17759 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17761 + pgdval_t val = native_pgd_val(pgd);
17763 + if (sizeof(pgdval_t) > sizeof(long))
17764 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17765 + val, (u64)val >> 32);
17767 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17771 static inline void pgd_clear(pgd_t *pgdp)
17773 set_pgd(pgdp, __pgd(0));
17774 @@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17775 pv_mmu_ops.set_fixmap(idx, phys, flags);
17778 +#ifdef CONFIG_PAX_KERNEXEC
17779 +static inline unsigned long pax_open_kernel(void)
17781 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17784 +static inline unsigned long pax_close_kernel(void)
17786 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17789 +static inline unsigned long pax_open_kernel(void) { return 0; }
17790 +static inline unsigned long pax_close_kernel(void) { return 0; }
17793 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17795 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17796 @@ -906,7 +933,7 @@ extern void default_banner(void);
17798 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17799 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17800 -#define PARA_INDIRECT(addr) *%cs:addr
17801 +#define PARA_INDIRECT(addr) *%ss:addr
17804 #define INTERRUPT_RETURN \
17805 @@ -976,6 +1003,21 @@ extern void default_banner(void);
17806 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
17808 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
17810 +#define GET_CR0_INTO_RDI \
17811 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17814 +#define SET_RDI_INTO_CR0 \
17815 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17817 +#define GET_CR3_INTO_RDI \
17818 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17821 +#define SET_RDI_INTO_CR3 \
17822 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17824 #endif /* CONFIG_X86_32 */
17826 #endif /* __ASSEMBLY__ */
17827 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17828 index f7b0b5c..cdd33f9 100644
17829 --- a/arch/x86/include/asm/paravirt_types.h
17830 +++ b/arch/x86/include/asm/paravirt_types.h
17831 @@ -84,7 +84,7 @@ struct pv_init_ops {
17833 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17834 unsigned long addr, unsigned len);
17836 +} __no_const __no_randomize_layout;
17839 struct pv_lazy_ops {
17840 @@ -92,13 +92,13 @@ struct pv_lazy_ops {
17841 void (*enter)(void);
17842 void (*leave)(void);
17843 void (*flush)(void);
17845 +} __no_randomize_layout;
17847 struct pv_time_ops {
17848 unsigned long long (*sched_clock)(void);
17849 unsigned long long (*steal_clock)(int cpu);
17850 unsigned long (*get_tsc_khz)(void);
17852 +} __no_const __no_randomize_layout;
17854 struct pv_cpu_ops {
17855 /* hooks for various privileged instructions */
17856 @@ -192,7 +192,7 @@ struct pv_cpu_ops {
17858 void (*start_context_switch)(struct task_struct *prev);
17859 void (*end_context_switch)(struct task_struct *next);
17861 +} __no_const __no_randomize_layout;
17863 struct pv_irq_ops {
17865 @@ -215,7 +215,7 @@ struct pv_irq_ops {
17866 #ifdef CONFIG_X86_64
17867 void (*adjust_exception_frame)(void);
17870 +} __no_randomize_layout;
17872 struct pv_apic_ops {
17873 #ifdef CONFIG_X86_LOCAL_APIC
17874 @@ -223,7 +223,7 @@ struct pv_apic_ops {
17875 unsigned long start_eip,
17876 unsigned long start_esp);
17879 +} __no_const __no_randomize_layout;
17881 struct pv_mmu_ops {
17882 unsigned long (*read_cr2)(void);
17883 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
17884 struct paravirt_callee_save make_pud;
17886 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17887 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17888 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
17889 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
17891 @@ -324,7 +325,13 @@ struct pv_mmu_ops {
17892 an mfn. We can tell which is which from the index. */
17893 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17894 phys_addr_t phys, pgprot_t flags);
17897 +#ifdef CONFIG_PAX_KERNEXEC
17898 + unsigned long (*pax_open_kernel)(void);
17899 + unsigned long (*pax_close_kernel)(void);
17902 +} __no_randomize_layout;
17904 struct arch_spinlock;
17906 @@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17907 struct pv_lock_ops {
17908 struct paravirt_callee_save lock_spinning;
17909 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17911 +} __no_randomize_layout;
17913 /* This contains all the paravirt structures: we get a convenient
17914 * number for each function using the offset which we use to indicate
17915 - * what to patch. */
17917 + * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17920 struct paravirt_patch_template {
17921 struct pv_init_ops pv_init_ops;
17922 struct pv_time_ops pv_time_ops;
17923 @@ -349,7 +359,7 @@ struct paravirt_patch_template {
17924 struct pv_apic_ops pv_apic_ops;
17925 struct pv_mmu_ops pv_mmu_ops;
17926 struct pv_lock_ops pv_lock_ops;
17928 +} __no_randomize_layout;
17930 extern struct pv_info pv_info;
17931 extern struct pv_init_ops pv_init_ops;
17932 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17933 index bf7f8b5..ca5799d 100644
17934 --- a/arch/x86/include/asm/pgalloc.h
17935 +++ b/arch/x86/include/asm/pgalloc.h
17936 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17937 pmd_t *pmd, pte_t *pte)
17939 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17940 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17943 +static inline void pmd_populate_user(struct mm_struct *mm,
17944 + pmd_t *pmd, pte_t *pte)
17946 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17947 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17950 @@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17952 #ifdef CONFIG_X86_PAE
17953 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17954 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17956 + pud_populate(mm, pudp, pmd);
17958 #else /* !CONFIG_X86_PAE */
17959 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17961 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17962 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17965 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17967 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17968 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17970 #endif /* CONFIG_X86_PAE */
17972 #if CONFIG_PGTABLE_LEVELS > 3
17973 @@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17974 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17977 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17979 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17980 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17983 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17985 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17986 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17987 index fd74a11..35fd5af 100644
17988 --- a/arch/x86/include/asm/pgtable-2level.h
17989 +++ b/arch/x86/include/asm/pgtable-2level.h
17990 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17992 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17994 + pax_open_kernel();
17996 + pax_close_kernel();
17999 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18000 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18001 index cdaa58c..e61122b 100644
18002 --- a/arch/x86/include/asm/pgtable-3level.h
18003 +++ b/arch/x86/include/asm/pgtable-3level.h
18004 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18006 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18008 + pax_open_kernel();
18009 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18010 + pax_close_kernel();
18013 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18015 + pax_open_kernel();
18016 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18017 + pax_close_kernel();
18021 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18022 index fe57e7a..0573d42 100644
18023 --- a/arch/x86/include/asm/pgtable.h
18024 +++ b/arch/x86/include/asm/pgtable.h
18025 @@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18027 #ifndef __PAGETABLE_PUD_FOLDED
18028 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18029 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18030 #define pgd_clear(pgd) native_pgd_clear(pgd)
18033 @@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18035 #define arch_end_context_switch(prev) do {} while(0)
18037 +#define pax_open_kernel() native_pax_open_kernel()
18038 +#define pax_close_kernel() native_pax_close_kernel()
18039 #endif /* CONFIG_PARAVIRT */
18041 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
18042 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18044 +#ifdef CONFIG_PAX_KERNEXEC
18045 +static inline unsigned long native_pax_open_kernel(void)
18047 + unsigned long cr0;
18049 + preempt_disable();
18051 + cr0 = read_cr0() ^ X86_CR0_WP;
18052 + BUG_ON(cr0 & X86_CR0_WP);
18055 + return cr0 ^ X86_CR0_WP;
18058 +static inline unsigned long native_pax_close_kernel(void)
18060 + unsigned long cr0;
18063 + cr0 = read_cr0() ^ X86_CR0_WP;
18064 + BUG_ON(!(cr0 & X86_CR0_WP));
18067 + preempt_enable_no_resched();
18068 + return cr0 ^ X86_CR0_WP;
18071 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
18072 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
18076 * The following only work if pte_present() is true.
18077 * Undefined behaviour if not..
18079 +static inline int pte_user(pte_t pte)
18081 + return pte_val(pte) & _PAGE_USER;
18084 static inline int pte_dirty(pte_t pte)
18086 return pte_flags(pte) & _PAGE_DIRTY;
18087 @@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18088 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18091 +static inline unsigned long pgd_pfn(pgd_t pgd)
18093 + return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18096 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18098 static inline int pmd_large(pmd_t pte)
18099 @@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18100 return pte_clear_flags(pte, _PAGE_RW);
18103 +static inline pte_t pte_mkread(pte_t pte)
18105 + return __pte(pte_val(pte) | _PAGE_USER);
18108 static inline pte_t pte_mkexec(pte_t pte)
18110 - return pte_clear_flags(pte, _PAGE_NX);
18111 +#ifdef CONFIG_X86_PAE
18112 + if (__supported_pte_mask & _PAGE_NX)
18113 + return pte_clear_flags(pte, _PAGE_NX);
18116 + return pte_set_flags(pte, _PAGE_USER);
18119 +static inline pte_t pte_exprotect(pte_t pte)
18121 +#ifdef CONFIG_X86_PAE
18122 + if (__supported_pte_mask & _PAGE_NX)
18123 + return pte_set_flags(pte, _PAGE_NX);
18126 + return pte_clear_flags(pte, _PAGE_USER);
18129 static inline pte_t pte_mkdirty(pte_t pte)
18130 @@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18133 #ifndef __ASSEMBLY__
18135 +#ifdef CONFIG_PAX_PER_CPU_PGD
18136 +extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18137 +enum cpu_pgd_type {kernel = 0, user = 1};
18138 +static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18140 + return cpu_pgd[cpu][type];
18144 #include <linux/mm_types.h>
18145 #include <linux/mmdebug.h>
18146 #include <linux/log2.h>
18147 @@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18148 * Currently stuck as a macro due to indirect forward reference to
18149 * linux/mmzone.h's __section_mem_map_addr() definition:
18151 -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18152 +#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18154 /* Find an entry in the second-level page table.. */
18155 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18156 @@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18157 * Currently stuck as a macro due to indirect forward reference to
18158 * linux/mmzone.h's __section_mem_map_addr() definition:
18160 -#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18161 +#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18163 /* to find an entry in a page-table-directory. */
18164 static inline unsigned long pud_index(unsigned long address)
18165 @@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18167 static inline int pgd_bad(pgd_t pgd)
18169 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18170 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18173 static inline int pgd_none(pgd_t pgd)
18174 @@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18175 * pgd_offset() returns a (pgd_t *)
18176 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18178 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18179 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18181 +#ifdef CONFIG_PAX_PER_CPU_PGD
18182 +#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18186 * a shortcut which implies the use of the kernel's pgd, instead
18188 @@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18189 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18190 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18192 +#ifdef CONFIG_X86_32
18193 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18195 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18196 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18198 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18199 +#ifdef __ASSEMBLY__
18200 +#define pax_user_shadow_base pax_user_shadow_base(%rip)
18202 +extern unsigned long pax_user_shadow_base;
18203 +extern pgdval_t clone_pgd_mask;
18206 +#define pax_user_shadow_base (0UL)
18211 #ifndef __ASSEMBLY__
18213 extern int direct_gbpages;
18214 @@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18215 * dst and src can be on the same page, but the range must not overlap,
18216 * and must not cross a page boundary.
18218 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18219 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18221 - memcpy(dst, src, count * sizeof(pgd_t));
18222 + pax_open_kernel();
18225 + pax_close_kernel();
18228 +#ifdef CONFIG_PAX_PER_CPU_PGD
18229 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18232 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18233 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18235 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18238 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18239 static inline int page_level_shift(enum pg_level level)
18241 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18242 index b6c0b40..3535d47 100644
18243 --- a/arch/x86/include/asm/pgtable_32.h
18244 +++ b/arch/x86/include/asm/pgtable_32.h
18247 struct vm_area_struct;
18249 -extern pgd_t swapper_pg_dir[1024];
18250 -extern pgd_t initial_page_table[1024];
18252 static inline void pgtable_cache_init(void) { }
18253 static inline void check_pgt_cache(void) { }
18254 void paging_init(void);
18255 @@ -45,6 +42,12 @@ void paging_init(void);
18256 # include <asm/pgtable-2level.h>
18259 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18260 +extern pgd_t initial_page_table[PTRS_PER_PGD];
18261 +#ifdef CONFIG_X86_PAE
18262 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18265 #if defined(CONFIG_HIGHPTE)
18266 #define pte_offset_map(dir, address) \
18267 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18268 @@ -59,12 +62,17 @@ void paging_init(void);
18269 /* Clear a kernel PTE and flush it from the TLB */
18270 #define kpte_clear_flush(ptep, vaddr) \
18272 + pax_open_kernel(); \
18273 pte_clear(&init_mm, (vaddr), (ptep)); \
18274 + pax_close_kernel(); \
18275 __flush_tlb_one((vaddr)); \
18278 #endif /* !__ASSEMBLY__ */
18280 +#define HAVE_ARCH_UNMAPPED_AREA
18281 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18284 * kern_addr_valid() is (1) for FLATMEM and (0) for
18285 * SPARSEMEM and DISCONTIGMEM
18286 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18287 index 9fb2f2b..b04b4bf 100644
18288 --- a/arch/x86/include/asm/pgtable_32_types.h
18289 +++ b/arch/x86/include/asm/pgtable_32_types.h
18292 #ifdef CONFIG_X86_PAE
18293 # include <asm/pgtable-3level_types.h>
18294 -# define PMD_SIZE (1UL << PMD_SHIFT)
18295 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18296 # define PMD_MASK (~(PMD_SIZE - 1))
18298 # include <asm/pgtable-2level_types.h>
18299 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18300 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18303 +#ifdef CONFIG_PAX_KERNEXEC
18304 +#ifndef __ASSEMBLY__
18305 +extern unsigned char MODULES_EXEC_VADDR[];
18306 +extern unsigned char MODULES_EXEC_END[];
18308 +#include <asm/boot.h>
18309 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18310 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18312 +#define ktla_ktva(addr) (addr)
18313 +#define ktva_ktla(addr) (addr)
18316 #define MODULES_VADDR VMALLOC_START
18317 #define MODULES_END VMALLOC_END
18318 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18319 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18320 index 2ee7811..55aca24 100644
18321 --- a/arch/x86/include/asm/pgtable_64.h
18322 +++ b/arch/x86/include/asm/pgtable_64.h
18323 @@ -16,11 +16,16 @@
18325 extern pud_t level3_kernel_pgt[512];
18326 extern pud_t level3_ident_pgt[512];
18327 +extern pud_t level3_vmalloc_start_pgt[512];
18328 +extern pud_t level3_vmalloc_end_pgt[512];
18329 +extern pud_t level3_vmemmap_pgt[512];
18330 +extern pud_t level2_vmemmap_pgt[512];
18331 extern pmd_t level2_kernel_pgt[512];
18332 extern pmd_t level2_fixmap_pgt[512];
18333 -extern pmd_t level2_ident_pgt[512];
18334 -extern pte_t level1_fixmap_pgt[512];
18335 -extern pgd_t init_level4_pgt[];
18336 +extern pmd_t level2_ident_pgt[2][512];
18337 +extern pte_t level1_fixmap_pgt[3][512];
18338 +extern pte_t level1_vsyscall_pgt[512];
18339 +extern pgd_t init_level4_pgt[512];
18341 #define swapper_pg_dir init_level4_pgt
18343 @@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18345 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18347 + pax_open_kernel();
18349 + pax_close_kernel();
18352 static inline void native_pmd_clear(pmd_t *pmd)
18353 @@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18355 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18357 + pax_open_kernel();
18359 + pax_close_kernel();
18362 static inline void native_pud_clear(pud_t *pud)
18363 @@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18365 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18367 + pax_open_kernel();
18369 + pax_close_kernel();
18372 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18377 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18378 index e6844df..432b56e 100644
18379 --- a/arch/x86/include/asm/pgtable_64_types.h
18380 +++ b/arch/x86/include/asm/pgtable_64_types.h
18381 @@ -60,11 +60,16 @@ typedef struct { pteval_t pte; } pte_t;
18382 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18383 #define MODULES_END _AC(0xffffffffff000000, UL)
18384 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18385 +#define MODULES_EXEC_VADDR MODULES_VADDR
18386 +#define MODULES_EXEC_END MODULES_END
18387 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18388 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18389 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18390 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18392 +#define ktla_ktva(addr) (addr)
18393 +#define ktva_ktla(addr) (addr)
18395 #define EARLY_DYNAMIC_PAGE_TABLES 64
18397 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18398 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18399 index 78f0c8c..4424bb0 100644
18400 --- a/arch/x86/include/asm/pgtable_types.h
18401 +++ b/arch/x86/include/asm/pgtable_types.h
18404 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18405 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18407 +#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18408 #define _PAGE_NX (_AT(pteval_t, 0))
18410 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18413 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18414 @@ -141,6 +143,9 @@ enum page_cache_mode {
18415 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18418 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
18419 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
18421 #define __PAGE_KERNEL_EXEC \
18422 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18423 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18424 @@ -148,7 +153,7 @@ enum page_cache_mode {
18425 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18426 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18427 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18428 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18429 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18430 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18431 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18432 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18433 @@ -194,7 +199,7 @@ enum page_cache_mode {
18434 #ifdef CONFIG_X86_64
18435 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18437 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18438 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18439 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18440 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18442 @@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18444 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18448 +#if CONFIG_PGTABLE_LEVELS == 3
18449 +#include <asm-generic/pgtable-nopud.h>
18452 +#if CONFIG_PGTABLE_LEVELS == 2
18453 +#include <asm-generic/pgtable-nopmd.h>
18456 +#ifndef __ASSEMBLY__
18457 #if CONFIG_PGTABLE_LEVELS > 3
18458 typedef struct { pudval_t pud; } pud_t;
18460 @@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18464 -#include <asm-generic/pgtable-nopud.h>
18466 static inline pudval_t native_pud_val(pud_t pud)
18468 return native_pgd_val(pud.pgd);
18469 @@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18473 -#include <asm-generic/pgtable-nopmd.h>
18475 static inline pmdval_t native_pmd_val(pmd_t pmd)
18477 return native_pgd_val(pmd.pud.pgd);
18478 @@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18480 extern pteval_t __supported_pte_mask;
18481 extern void set_nx(void);
18482 -extern int nx_enabled;
18484 #define pgprot_writecombine pgprot_writecombine
18485 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18486 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18487 index 8f327184..368fb29 100644
18488 --- a/arch/x86/include/asm/preempt.h
18489 +++ b/arch/x86/include/asm/preempt.h
18490 @@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18492 static __always_inline bool __preempt_count_dec_and_test(void)
18494 - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18495 + GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18499 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18500 index 23ba676..6584489 100644
18501 --- a/arch/x86/include/asm/processor.h
18502 +++ b/arch/x86/include/asm/processor.h
18503 @@ -130,7 +130,7 @@ struct cpuinfo_x86 {
18504 /* Index into per_cpu list: */
18508 +} __randomize_layout;
18510 #define X86_VENDOR_INTEL 0
18511 #define X86_VENDOR_CYRIX 1
18512 @@ -201,9 +201,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18516 +/* invpcid (%rdx),%rax */
18517 +#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18519 +#define INVPCID_SINGLE_ADDRESS 0UL
18520 +#define INVPCID_SINGLE_CONTEXT 1UL
18521 +#define INVPCID_ALL_GLOBAL 2UL
18522 +#define INVPCID_ALL_NONGLOBAL 3UL
18524 +#define PCID_KERNEL 0UL
18525 +#define PCID_USER 1UL
18526 +#define PCID_NOFLUSH (1UL << 63)
18528 static inline void load_cr3(pgd_t *pgdir)
18530 - write_cr3(__pa(pgdir));
18531 + write_cr3(__pa(pgdir) | PCID_KERNEL);
18534 #ifdef CONFIG_X86_32
18535 @@ -300,7 +312,7 @@ struct tss_struct {
18537 } ____cacheline_aligned;
18539 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
18540 +extern struct tss_struct cpu_tss[NR_CPUS];
18542 #ifdef CONFIG_X86_32
18543 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
18544 @@ -500,6 +512,7 @@ struct thread_struct {
18546 unsigned short fsindex;
18547 unsigned short gsindex;
18548 + unsigned short ss;
18550 #ifdef CONFIG_X86_32
18552 @@ -585,10 +598,10 @@ static inline void native_swapgs(void)
18556 -static inline unsigned long current_top_of_stack(void)
18557 +static inline unsigned long current_top_of_stack(unsigned int cpu)
18559 #ifdef CONFIG_X86_64
18560 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
18561 + return cpu_tss[cpu].x86_tss.sp0;
18563 /* sp0 on x86_32 is special in and around vm86 mode. */
18564 return this_cpu_read_stable(cpu_current_top_of_stack);
18565 @@ -837,8 +850,15 @@ static inline void spin_lock_prefetch(const void *x)
18567 #define TASK_SIZE PAGE_OFFSET
18568 #define TASK_SIZE_MAX TASK_SIZE
18570 +#ifdef CONFIG_PAX_SEGMEXEC
18571 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18572 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18574 #define STACK_TOP TASK_SIZE
18575 -#define STACK_TOP_MAX STACK_TOP
18578 +#define STACK_TOP_MAX TASK_SIZE
18580 #define INIT_THREAD { \
18581 .sp0 = TOP_OF_INIT_STACK, \
18582 @@ -859,12 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18583 * "struct pt_regs" is possible, but they may contain the
18584 * completely wrong values.
18586 -#define task_pt_regs(task) \
18588 - unsigned long __ptr = (unsigned long)task_stack_page(task); \
18589 - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
18590 - ((struct pt_regs *)__ptr) - 1; \
18592 +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
18594 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
18596 @@ -878,13 +893,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18597 * particular problem by preventing anything from being mapped
18598 * at the maximum canonical address.
18600 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18601 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18603 /* This decides where the kernel will search for a free chunk of vm
18604 * space during mmap's.
18606 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18607 - 0xc0000000 : 0xFFFFe000)
18608 + 0xc0000000 : 0xFFFFf000)
18610 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18611 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18612 @@ -918,6 +933,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18614 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18616 +#ifdef CONFIG_PAX_SEGMEXEC
18617 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18620 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18622 /* Get/set a process' ability to use the timestamp counter instruction */
18623 @@ -962,7 +981,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18627 -extern unsigned long arch_align_stack(unsigned long sp);
18628 +#define arch_align_stack(x) ((x) & ~0xfUL)
18629 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18631 void default_idle(void);
18632 @@ -972,6 +991,6 @@ bool xen_set_default_idle(void);
18633 #define xen_set_default_idle 0
18636 -void stop_this_cpu(void *dummy);
18637 +void stop_this_cpu(void *dummy) __noreturn;
18638 void df_debug(struct pt_regs *regs, long error_code);
18639 #endif /* _ASM_X86_PROCESSOR_H */
18640 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18641 index 5fabf13..7388158 100644
18642 --- a/arch/x86/include/asm/ptrace.h
18643 +++ b/arch/x86/include/asm/ptrace.h
18644 @@ -125,15 +125,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18645 #ifdef CONFIG_X86_64
18646 static inline bool user_64bit_mode(struct pt_regs *regs)
18648 + unsigned long cs = regs->cs & 0xffff;
18649 #ifndef CONFIG_PARAVIRT
18651 * On non-paravirt systems, this is the only long mode CPL 3
18652 * selector. We do not allow long mode selectors in the LDT.
18654 - return regs->cs == __USER_CS;
18655 + return cs == __USER_CS;
18657 /* Headers are too twisted for this to go in paravirt.h. */
18658 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18659 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18663 @@ -180,9 +181,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18664 * Traps from the kernel do not save sp and ss.
18665 * Use the helper function to retrieve sp.
18667 - if (offset == offsetof(struct pt_regs, sp) &&
18668 - regs->cs == __KERNEL_CS)
18669 - return kernel_stack_pointer(regs);
18670 + if (offset == offsetof(struct pt_regs, sp)) {
18671 + unsigned long cs = regs->cs & 0xffff;
18672 + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18673 + return kernel_stack_pointer(regs);
18676 return *(unsigned long *)((unsigned long)regs + offset);
18678 diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18679 index ae0e241..e80b10b 100644
18680 --- a/arch/x86/include/asm/qrwlock.h
18681 +++ b/arch/x86/include/asm/qrwlock.h
18683 #define queue_write_unlock queue_write_unlock
18684 static inline void queue_write_unlock(struct qrwlock *lock)
18687 - ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18689 + ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18693 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18694 index 9c6b890..5305f53 100644
18695 --- a/arch/x86/include/asm/realmode.h
18696 +++ b/arch/x86/include/asm/realmode.h
18697 @@ -22,16 +22,14 @@ struct real_mode_header {
18699 /* APM/BIOS reboot */
18700 u32 machine_real_restart_asm;
18701 -#ifdef CONFIG_X86_64
18702 u32 machine_real_restart_seg;
18706 /* This must match data at trampoline_32/64.S */
18707 struct trampoline_header {
18708 #ifdef CONFIG_X86_32
18715 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18716 index a82c4f1..ac45053 100644
18717 --- a/arch/x86/include/asm/reboot.h
18718 +++ b/arch/x86/include/asm/reboot.h
18722 struct machine_ops {
18723 - void (*restart)(char *cmd);
18724 - void (*halt)(void);
18725 - void (*power_off)(void);
18726 + void (* __noreturn restart)(char *cmd);
18727 + void (* __noreturn halt)(void);
18728 + void (* __noreturn power_off)(void);
18729 void (*shutdown)(void);
18730 void (*crash_shutdown)(struct pt_regs *);
18731 - void (*emergency_restart)(void);
18733 + void (* __noreturn emergency_restart)(void);
18736 extern struct machine_ops machine_ops;
18738 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18739 index 8f7866a..e442f20 100644
18740 --- a/arch/x86/include/asm/rmwcc.h
18741 +++ b/arch/x86/include/asm/rmwcc.h
18744 #ifdef CC_HAVE_ASM_GOTO
18746 -#define __GEN_RMWcc(fullop, var, cc, ...) \
18747 +#ifdef CONFIG_PAX_REFCOUNT
18748 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18750 + asm_volatile_goto (fullop \
18753 + ";int $4\n0:\n" \
18754 + _ASM_EXTABLE(0b, 0b) \
18755 + ";j" cc " %l[cc_label]" \
18756 + : : "m" (var), ## __VA_ARGS__ \
18757 + : "memory" : cc_label); \
18763 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18765 + asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18766 + : : "m" (var), ## __VA_ARGS__ \
18767 + : "memory" : cc_label); \
18774 +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18776 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18777 : : "m" (var), ## __VA_ARGS__ \
18778 @@ -13,15 +40,46 @@ cc_label: \
18782 -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18783 - __GEN_RMWcc(op " " arg0, var, cc)
18784 +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18785 + __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18787 -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18788 - __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18789 +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18790 + __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18792 +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18793 + __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18795 +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18796 + __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18798 #else /* !CC_HAVE_ASM_GOTO */
18800 -#define __GEN_RMWcc(fullop, var, cc, ...) \
18801 +#ifdef CONFIG_PAX_REFCOUNT
18802 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18805 + asm volatile (fullop \
18808 + ";int $4\n0:\n" \
18809 + _ASM_EXTABLE(0b, 0b) \
18810 + "; set" cc " %1" \
18811 + : "+m" (var), "=qm" (c) \
18812 + : __VA_ARGS__ : "memory"); \
18816 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18819 + asm volatile (fullop "; set" cc " %1" \
18820 + : "+m" (var), "=qm" (c) \
18821 + : __VA_ARGS__ : "memory"); \
18826 +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18829 asm volatile (fullop "; set" cc " %1" \
18830 @@ -30,11 +88,17 @@ do { \
18834 -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18835 - __GEN_RMWcc(op " " arg0, var, cc)
18836 +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18837 + __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18839 +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18840 + __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18842 +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18843 + __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18845 -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18846 - __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18847 +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18848 + __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18850 #endif /* CC_HAVE_ASM_GOTO */
18852 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18853 index cad82c9..2e5c5c1 100644
18854 --- a/arch/x86/include/asm/rwsem.h
18855 +++ b/arch/x86/include/asm/rwsem.h
18856 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18858 asm volatile("# beginning down_read\n\t"
18859 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18861 +#ifdef CONFIG_PAX_REFCOUNT
18863 + LOCK_PREFIX _ASM_DEC "(%1)\n"
18865 + _ASM_EXTABLE(0b, 0b)
18868 /* adds 0x00000001 */
18870 " call call_rwsem_down_read_failed\n"
18871 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18876 +#ifdef CONFIG_PAX_REFCOUNT
18880 + _ASM_EXTABLE(0b, 0b)
18884 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18886 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18888 asm volatile("# beginning down_write\n\t"
18889 LOCK_PREFIX " xadd %1,(%2)\n\t"
18891 +#ifdef CONFIG_PAX_REFCOUNT
18895 + _ASM_EXTABLE(0b, 0b)
18898 /* adds 0xffff0001, returns the old value */
18899 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18900 /* was the active mask 0 before? */
18901 @@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18903 asm volatile("# beginning __up_read\n\t"
18904 LOCK_PREFIX " xadd %1,(%2)\n\t"
18906 +#ifdef CONFIG_PAX_REFCOUNT
18910 + _ASM_EXTABLE(0b, 0b)
18913 /* subtracts 1, returns the old value */
18915 " call call_rwsem_wake\n" /* expects old value in %edx */
18916 @@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18918 asm volatile("# beginning __up_write\n\t"
18919 LOCK_PREFIX " xadd %1,(%2)\n\t"
18921 +#ifdef CONFIG_PAX_REFCOUNT
18925 + _ASM_EXTABLE(0b, 0b)
18928 /* subtracts 0xffff0001, returns the old value */
18930 " call call_rwsem_wake\n" /* expects old value in %edx */
18931 @@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18933 asm volatile("# beginning __downgrade_write\n\t"
18934 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18936 +#ifdef CONFIG_PAX_REFCOUNT
18938 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18940 + _ASM_EXTABLE(0b, 0b)
18944 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18945 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18946 @@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18948 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18950 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18951 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18953 +#ifdef CONFIG_PAX_REFCOUNT
18955 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
18957 + _ASM_EXTABLE(0b, 0b)
18960 : "+m" (sem->count)
18963 @@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18965 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18967 - return delta + xadd(&sem->count, delta);
18968 + return delta + xadd_check_overflow(&sem->count, delta);
18971 #endif /* __KERNEL__ */
18972 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18973 index 7d5a192..23ef1aa 100644
18974 --- a/arch/x86/include/asm/segment.h
18975 +++ b/arch/x86/include/asm/segment.h
18976 @@ -82,14 +82,20 @@
18977 * 26 - ESPFIX small SS
18978 * 27 - per-cpu [ offset to per-cpu data area ]
18979 * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
18982 + * 29 - PCI BIOS CS
18983 + * 30 - PCI BIOS DS
18984 * 31 - TSS for double fault handler
18986 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18987 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18988 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18989 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18991 #define GDT_ENTRY_TLS_MIN 6
18992 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18994 #define GDT_ENTRY_KERNEL_CS 12
18995 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 4
18996 #define GDT_ENTRY_KERNEL_DS 13
18997 #define GDT_ENTRY_DEFAULT_USER_CS 14
18998 #define GDT_ENTRY_DEFAULT_USER_DS 15
18999 @@ -106,6 +112,12 @@
19000 #define GDT_ENTRY_PERCPU 27
19001 #define GDT_ENTRY_STACK_CANARY 28
19003 +#define GDT_ENTRY_PCIBIOS_CS 29
19004 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19006 +#define GDT_ENTRY_PCIBIOS_DS 30
19007 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19009 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19012 @@ -118,6 +130,7 @@
19015 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19016 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19017 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19018 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
19019 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
19020 @@ -129,7 +142,7 @@
19021 #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8)
19023 /* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */
19024 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == PNP_CS32)
19025 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19027 /* data segment for BIOS: */
19028 #define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8)
19029 @@ -176,6 +189,8 @@
19030 #define GDT_ENTRY_DEFAULT_USER_DS 5
19031 #define GDT_ENTRY_DEFAULT_USER_CS 6
19033 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19035 /* Needs two entries */
19036 #define GDT_ENTRY_TSS 8
19037 /* Needs two entries */
19038 @@ -187,10 +202,12 @@
19039 /* Abused to load per CPU data from limit */
19040 #define GDT_ENTRY_PER_CPU 15
19042 +#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19045 * Number of entries in the GDT table:
19047 -#define GDT_ENTRIES 16
19048 +#define GDT_ENTRIES 17
19051 * Segment selector values corresponding to the above entries:
19052 @@ -200,7 +217,9 @@
19054 #define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8)
19055 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19056 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19057 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19058 +#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19059 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
19060 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
19061 #define __USER32_DS __USER_DS
19062 diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19063 index ba665eb..0f72938 100644
19064 --- a/arch/x86/include/asm/smap.h
19065 +++ b/arch/x86/include/asm/smap.h
19068 #include <asm/alternative-asm.h>
19070 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19071 +#define ASM_PAX_OPEN_USERLAND \
19072 + ALTERNATIVE "", "call __pax_open_userland", X86_FEATURE_STRONGUDEREF
19074 +#define ASM_PAX_CLOSE_USERLAND \
19075 + ALTERNATIVE "", "call __pax_close_userland", X86_FEATURE_STRONGUDEREF
19078 +#define ASM_PAX_OPEN_USERLAND
19079 +#define ASM_PAX_CLOSE_USERLAND
19082 #ifdef CONFIG_X86_SMAP
19087 #include <asm/alternative.h>
19089 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
19090 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19092 +extern void __pax_open_userland(void);
19093 +static __always_inline unsigned long pax_open_userland(void)
19096 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19097 + asm volatile(ALTERNATIVE("", "call %P[open]", X86_FEATURE_STRONGUDEREF)
19099 + : [open] "i" (__pax_open_userland)
19100 + : "memory", "rax");
19106 +extern void __pax_close_userland(void);
19107 +static __always_inline unsigned long pax_close_userland(void)
19110 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19111 + asm volatile(ALTERNATIVE("", "call %P[close]", X86_FEATURE_STRONGUDEREF)
19113 + : [close] "i" (__pax_close_userland)
19114 + : "memory", "rax");
19120 #ifdef CONFIG_X86_SMAP
19122 static __always_inline void clac(void)
19123 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19124 index 17a8dce..79f7280 100644
19125 --- a/arch/x86/include/asm/smp.h
19126 +++ b/arch/x86/include/asm/smp.h
19127 @@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19128 /* cpus sharing the last level cache: */
19129 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19130 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19131 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19132 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19134 static inline struct cpumask *cpu_sibling_mask(int cpu)
19136 @@ -78,7 +78,7 @@ struct smp_ops {
19138 void (*send_call_func_ipi)(const struct cpumask *mask);
19139 void (*send_call_func_single_ipi)(int cpu);
19143 /* Globals due to paravirt */
19144 extern void set_cpu_sibling_map(int cpu);
19145 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus;
19146 extern int safe_smp_processor_id(void);
19148 #elif defined(CONFIG_X86_64_SMP)
19149 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19151 -#define stack_smp_processor_id() \
19153 - struct thread_info *ti; \
19154 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19157 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19158 +#define stack_smp_processor_id() raw_smp_processor_id()
19159 #define safe_smp_processor_id() smp_processor_id()
19162 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19163 index 6a99859..03cb807 100644
19164 --- a/arch/x86/include/asm/stackprotector.h
19165 +++ b/arch/x86/include/asm/stackprotector.h
19167 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19169 #define GDT_STACK_CANARY_INIT \
19170 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19171 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19174 * Initialize the stackprotector canary value.
19175 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19177 static inline void load_stack_canary_segment(void)
19179 -#ifdef CONFIG_X86_32
19180 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19181 asm volatile ("mov %0, %%gs" : : "r" (0));
19184 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19185 index 70bbe39..4ae2bd4 100644
19186 --- a/arch/x86/include/asm/stacktrace.h
19187 +++ b/arch/x86/include/asm/stacktrace.h
19188 @@ -11,28 +11,20 @@
19190 extern int kstack_depth_to_print;
19192 -struct thread_info;
19193 +struct task_struct;
19194 struct stacktrace_ops;
19196 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19197 - unsigned long *stack,
19198 - unsigned long bp,
19199 - const struct stacktrace_ops *ops,
19201 - unsigned long *end,
19203 +typedef unsigned long walk_stack_t(struct task_struct *task,
19204 + void *stack_start,
19205 + unsigned long *stack,
19206 + unsigned long bp,
19207 + const struct stacktrace_ops *ops,
19209 + unsigned long *end,
19212 -extern unsigned long
19213 -print_context_stack(struct thread_info *tinfo,
19214 - unsigned long *stack, unsigned long bp,
19215 - const struct stacktrace_ops *ops, void *data,
19216 - unsigned long *end, int *graph);
19218 -extern unsigned long
19219 -print_context_stack_bp(struct thread_info *tinfo,
19220 - unsigned long *stack, unsigned long bp,
19221 - const struct stacktrace_ops *ops, void *data,
19222 - unsigned long *end, int *graph);
19223 +extern walk_stack_t print_context_stack;
19224 +extern walk_stack_t print_context_stack_bp;
19226 /* Generic stack tracer with callbacks */
19228 @@ -40,7 +32,7 @@ struct stacktrace_ops {
19229 void (*address)(void *data, unsigned long address, int reliable);
19230 /* On negative return stop dumping */
19231 int (*stack)(void *data, char *name);
19232 - walk_stack_t walk_stack;
19233 + walk_stack_t *walk_stack;
19236 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19237 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19238 index 751bf4b..a1278b5 100644
19239 --- a/arch/x86/include/asm/switch_to.h
19240 +++ b/arch/x86/include/asm/switch_to.h
19241 @@ -112,7 +112,7 @@ do { \
19242 "call __switch_to\n\t" \
19243 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19245 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
19246 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19247 "movq %%rax,%%rdi\n\t" \
19248 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19249 "jnz ret_from_fork\n\t" \
19250 @@ -123,7 +123,7 @@ do { \
19251 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19252 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19253 [_tif_fork] "i" (_TIF_FORK), \
19254 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
19255 + [thread_info] "m" (current_tinfo), \
19256 [current_task] "m" (current_task) \
19257 __switch_canary_iparam \
19258 : "memory", "cc" __EXTRA_CLOBBER)
19259 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19260 index b4bdec3..e8af9bc 100644
19261 --- a/arch/x86/include/asm/thread_info.h
19262 +++ b/arch/x86/include/asm/thread_info.h
19264 #ifdef CONFIG_X86_32
19265 # define TOP_OF_KERNEL_STACK_PADDING 8
19267 -# define TOP_OF_KERNEL_STACK_PADDING 0
19268 +# define TOP_OF_KERNEL_STACK_PADDING 16
19272 @@ -50,27 +50,26 @@ struct task_struct;
19273 #include <linux/atomic.h>
19275 struct thread_info {
19276 - struct task_struct *task; /* main task structure */
19277 __u32 flags; /* low level flags */
19278 __u32 status; /* thread synchronous flags */
19279 __u32 cpu; /* current CPU */
19280 int saved_preempt_count;
19281 mm_segment_t addr_limit;
19282 void __user *sysenter_return;
19283 + unsigned long lowest_stack;
19284 unsigned int sig_on_uaccess_error:1;
19285 unsigned int uaccess_err:1; /* uaccess failed */
19288 -#define INIT_THREAD_INFO(tsk) \
19289 +#define INIT_THREAD_INFO \
19294 .saved_preempt_count = INIT_PREEMPT_COUNT, \
19295 .addr_limit = KERNEL_DS, \
19298 -#define init_thread_info (init_thread_union.thread_info)
19299 +#define init_thread_info (init_thread_union.stack)
19300 #define init_stack (init_thread_union.stack)
19302 #else /* !__ASSEMBLY__ */
19303 @@ -110,6 +109,7 @@ struct thread_info {
19304 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19305 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19306 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19307 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19309 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19310 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19311 @@ -133,17 +133,18 @@ struct thread_info {
19312 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19313 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19314 #define _TIF_X32 (1 << TIF_X32)
19315 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19317 /* work to do in syscall_trace_enter() */
19318 #define _TIF_WORK_SYSCALL_ENTRY \
19319 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19320 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19322 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
19324 /* work to do in syscall_trace_leave() */
19325 #define _TIF_WORK_SYSCALL_EXIT \
19326 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19327 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19328 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19330 /* work to do on interrupt/exception return */
19331 #define _TIF_WORK_MASK \
19332 @@ -154,7 +155,7 @@ struct thread_info {
19333 /* work to do on any return to user space */
19334 #define _TIF_ALLWORK_MASK \
19335 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19337 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
19339 /* Only used for 64 bit */
19340 #define _TIF_DO_NOTIFY_MASK \
19341 @@ -179,9 +180,11 @@ struct thread_info {
19343 DECLARE_PER_CPU(unsigned long, kernel_stack);
19345 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19347 static inline struct thread_info *current_thread_info(void)
19349 - return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
19350 + return this_cpu_read_stable(current_tinfo);
19353 static inline unsigned long current_stack_pointer(void)
19354 @@ -199,8 +202,7 @@ static inline unsigned long current_stack_pointer(void)
19356 /* Load thread_info address into "reg" */
19357 #define GET_THREAD_INFO(reg) \
19358 - _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19359 - _ASM_SUB $(THREAD_SIZE),reg ;
19360 + _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19363 * ASM operand which evaluates to a 'thread_info' address of
19364 @@ -293,5 +295,12 @@ static inline bool is_ia32_task(void)
19365 extern void arch_task_cache_init(void);
19366 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19367 extern void arch_release_task_struct(struct task_struct *tsk);
19369 +#define __HAVE_THREAD_FUNCTIONS
19370 +#define task_thread_info(task) (&(task)->tinfo)
19371 +#define task_stack_page(task) ((task)->stack)
19372 +#define setup_thread_stack(p, org) do {} while (0)
19373 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19376 #endif /* _ASM_X86_THREAD_INFO_H */
19377 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19378 index cd79194..e7a9491 100644
19379 --- a/arch/x86/include/asm/tlbflush.h
19380 +++ b/arch/x86/include/asm/tlbflush.h
19381 @@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19383 static inline void __native_flush_tlb(void)
19385 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19386 + u64 descriptor[2];
19388 + descriptor[0] = PCID_KERNEL;
19389 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19393 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19394 + if (static_cpu_has(X86_FEATURE_PCID)) {
19395 + unsigned int cpu = raw_get_cpu();
19397 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19398 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19399 + raw_put_cpu_no_resched();
19404 native_write_cr3(native_read_cr3());
19407 static inline void __native_flush_tlb_global_irq_disabled(void)
19409 - unsigned long cr4;
19410 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19411 + u64 descriptor[2];
19413 - cr4 = this_cpu_read(cpu_tlbstate.cr4);
19415 - native_write_cr4(cr4 & ~X86_CR4_PGE);
19416 - /* write old PGE again and flush TLBs */
19417 - native_write_cr4(cr4);
19418 + descriptor[0] = PCID_KERNEL;
19419 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19421 + unsigned long cr4;
19423 + cr4 = this_cpu_read(cpu_tlbstate.cr4);
19425 + native_write_cr4(cr4 & ~X86_CR4_PGE);
19426 + /* write old PGE again and flush TLBs */
19427 + native_write_cr4(cr4);
19431 static inline void __native_flush_tlb_global(void)
19432 @@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19434 static inline void __native_flush_tlb_single(unsigned long addr)
19436 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19437 + u64 descriptor[2];
19439 + descriptor[0] = PCID_KERNEL;
19440 + descriptor[1] = addr;
19442 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19443 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19444 + if (addr < TASK_SIZE_MAX)
19445 + descriptor[1] += pax_user_shadow_base;
19446 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19449 + descriptor[0] = PCID_USER;
19450 + descriptor[1] = addr;
19453 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19457 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19458 + if (static_cpu_has(X86_FEATURE_PCID)) {
19459 + unsigned int cpu = raw_get_cpu();
19461 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19462 + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19463 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19464 + raw_put_cpu_no_resched();
19466 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19467 + addr += pax_user_shadow_base;
19471 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19474 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19475 index ace9dec..3f9e253 100644
19476 --- a/arch/x86/include/asm/uaccess.h
19477 +++ b/arch/x86/include/asm/uaccess.h
19479 #include <linux/compiler.h>
19480 #include <linux/thread_info.h>
19481 #include <linux/string.h>
19482 +#include <linux/spinlock.h>
19483 #include <asm/asm.h>
19484 #include <asm/page.h>
19485 #include <asm/smap.h>
19488 #define get_ds() (KERNEL_DS)
19489 #define get_fs() (current_thread_info()->addr_limit)
19490 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19491 +void __set_fs(mm_segment_t x);
19492 +void set_fs(mm_segment_t x);
19494 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19497 #define segment_eq(a, b) ((a).seg == (b).seg)
19499 @@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19500 * checks that the pointer is in the user space range - after calling
19501 * this function, memory access functions may still return -EFAULT.
19503 -#define access_ok(type, addr, size) \
19504 - likely(!__range_not_ok(addr, size, user_addr_max()))
19505 +extern int _cond_resched(void);
19506 +#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19507 +#define access_ok(type, addr, size) \
19509 + unsigned long __size = size; \
19510 + unsigned long __addr = (unsigned long)addr; \
19511 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19512 + if (__ret_ao && __size) { \
19513 + unsigned long __addr_ao = __addr & PAGE_MASK; \
19514 + unsigned long __end_ao = __addr + __size - 1; \
19515 + if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19516 + while (__addr_ao <= __end_ao) { \
19518 + __addr_ao += PAGE_SIZE; \
19519 + if (__size > PAGE_SIZE) \
19520 + _cond_resched(); \
19521 + if (__get_user(__c_ao, (char __user *)__addr)) \
19523 + if (type != VERIFY_WRITE) { \
19524 + __addr = __addr_ao; \
19527 + if (__put_user(__c_ao, (char __user *)__addr)) \
19529 + __addr = __addr_ao; \
19537 * The exception table consists of pairs of addresses relative to the
19538 @@ -134,11 +168,13 @@ extern int __get_user_8(void);
19539 extern int __get_user_bad(void);
19542 - * This is a type: either unsigned long, if the argument fits into
19543 - * that type, or otherwise unsigned long long.
19544 + * This is a type: either (un)signed int, if the argument fits into
19545 + * that type, or otherwise (un)signed long long.
19547 #define __inttype(x) \
19548 -__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19549 +__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19550 + __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19551 + __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19554 * get_user: - Get a simple variable from user space.
19555 @@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19556 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19557 __chk_user_ptr(ptr); \
19559 + pax_open_userland(); \
19560 asm volatile("call __get_user_%P3" \
19561 : "=a" (__ret_gu), "=r" (__val_gu) \
19562 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19563 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19564 + pax_close_userland(); \
19568 @@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19569 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19570 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19573 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19574 +#define __copyuser_seg "gs;"
19575 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19576 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19578 +#define __copyuser_seg
19579 +#define __COPYUSER_SET_ES
19580 +#define __COPYUSER_RESTORE_ES
19583 #ifdef CONFIG_X86_32
19584 #define __put_user_asm_u64(x, addr, err, errret) \
19585 asm volatile(ASM_STAC "\n" \
19586 - "1: movl %%eax,0(%2)\n" \
19587 - "2: movl %%edx,4(%2)\n" \
19588 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19589 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19590 "3: " ASM_CLAC "\n" \
19591 ".section .fixup,\"ax\"\n" \
19592 "4: movl %3,%0\n" \
19593 @@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19595 #define __put_user_asm_ex_u64(x, addr) \
19596 asm volatile(ASM_STAC "\n" \
19597 - "1: movl %%eax,0(%1)\n" \
19598 - "2: movl %%edx,4(%1)\n" \
19599 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19600 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19601 "3: " ASM_CLAC "\n" \
19602 _ASM_EXTABLE_EX(1b, 2b) \
19603 _ASM_EXTABLE_EX(2b, 3b) \
19604 @@ -257,7 +303,8 @@ extern void __put_user_8(void);
19605 __typeof__(*(ptr)) __pu_val; \
19606 __chk_user_ptr(ptr); \
19609 + __pu_val = (x); \
19610 + pax_open_userland(); \
19611 switch (sizeof(*(ptr))) { \
19613 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19614 @@ -275,6 +322,7 @@ extern void __put_user_8(void);
19615 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19618 + pax_close_userland(); \
19622 @@ -355,8 +403,10 @@ do { \
19625 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19627 + pax_open_userland(); \
19628 asm volatile(ASM_STAC "\n" \
19629 - "1: mov"itype" %2,%"rtype"1\n" \
19630 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19631 "2: " ASM_CLAC "\n" \
19632 ".section .fixup,\"ax\"\n" \
19634 @@ -364,8 +414,10 @@ do { \
19637 _ASM_EXTABLE(1b, 3b) \
19638 - : "=r" (err), ltype(x) \
19639 - : "m" (__m(addr)), "i" (errret), "0" (err))
19640 + : "=r" (err), ltype (x) \
19641 + : "m" (__m(addr)), "i" (errret), "0" (err)); \
19642 + pax_close_userland(); \
19645 #define __get_user_size_ex(x, ptr, size) \
19647 @@ -389,7 +441,7 @@ do { \
19650 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19651 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19652 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19654 _ASM_EXTABLE_EX(1b, 2b) \
19655 : ltype(x) : "m" (__m(addr)))
19656 @@ -406,13 +458,24 @@ do { \
19658 unsigned long __gu_val; \
19659 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19660 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
19661 + (x) = (__typeof__(*(ptr)))__gu_val; \
19665 /* FIXME: this hack is definitely wrong -AK */
19666 struct __large_struct { unsigned long buf[100]; };
19667 -#define __m(x) (*(struct __large_struct __user *)(x))
19668 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19669 +#define ____m(x) \
19671 + unsigned long ____x = (unsigned long)(x); \
19672 + if (____x < pax_user_shadow_base) \
19673 + ____x += pax_user_shadow_base; \
19674 + (typeof(x))____x; \
19677 +#define ____m(x) (x)
19679 +#define __m(x) (*(struct __large_struct __user *)____m(x))
19682 * Tell gcc we read from memory instead of writing: this is because
19683 @@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19686 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19688 + pax_open_userland(); \
19689 asm volatile(ASM_STAC "\n" \
19690 - "1: mov"itype" %"rtype"1,%2\n" \
19691 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19692 "2: " ASM_CLAC "\n" \
19693 ".section .fixup,\"ax\"\n" \
19695 @@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19697 _ASM_EXTABLE(1b, 3b) \
19699 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19700 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19701 + pax_close_userland(); \
19704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19705 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19706 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19708 _ASM_EXTABLE_EX(1b, 2b) \
19709 : : ltype(x), "m" (__m(addr)))
19710 @@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19712 #define uaccess_try do { \
19713 current_thread_info()->uaccess_err = 0; \
19714 + pax_open_userland(); \
19718 #define uaccess_catch(err) \
19720 + pax_close_userland(); \
19721 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19724 @@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19725 * On error, the variable @x is set to zero.
19728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19729 +#define __get_user(x, ptr) get_user((x), (ptr))
19731 #define __get_user(x, ptr) \
19732 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19736 * __put_user: - Write a simple value into user space, with less checking.
19737 @@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19738 * Returns zero on success, or -EFAULT on error.
19741 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19742 +#define __put_user(x, ptr) put_user((x), (ptr))
19744 #define __put_user(x, ptr) \
19745 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19748 #define __get_user_unaligned __get_user
19749 #define __put_user_unaligned __put_user
19750 @@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19751 #define get_user_ex(x, ptr) do { \
19752 unsigned long __gue_val; \
19753 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19754 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
19755 + (x) = (__typeof__(*(ptr)))__gue_val; \
19758 #define put_user_try uaccess_try
19759 @@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19760 extern __must_check long strnlen_user(const char __user *str, long n);
19762 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19763 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19764 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19766 extern void __cmpxchg_wrong_size(void)
19767 __compiletime_error("Bad argument size for cmpxchg");
19768 @@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19769 __typeof__(ptr) __uval = (uval); \
19770 __typeof__(*(ptr)) __old = (old); \
19771 __typeof__(*(ptr)) __new = (new); \
19772 + pax_open_userland(); \
19776 asm volatile("\t" ASM_STAC "\n" \
19777 - "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19778 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19779 "2:\t" ASM_CLAC "\n" \
19780 "\t.section .fixup, \"ax\"\n" \
19781 "3:\tmov %3, %0\n" \
19784 _ASM_EXTABLE(1b, 3b) \
19785 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19786 + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19787 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19790 @@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19793 asm volatile("\t" ASM_STAC "\n" \
19794 - "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19795 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19796 "2:\t" ASM_CLAC "\n" \
19797 "\t.section .fixup, \"ax\"\n" \
19798 "3:\tmov %3, %0\n" \
19801 _ASM_EXTABLE(1b, 3b) \
19802 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19803 + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19804 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19807 @@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19810 asm volatile("\t" ASM_STAC "\n" \
19811 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19812 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19813 "2:\t" ASM_CLAC "\n" \
19814 "\t.section .fixup, \"ax\"\n" \
19815 "3:\tmov %3, %0\n" \
19818 _ASM_EXTABLE(1b, 3b) \
19819 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19820 + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19821 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19824 @@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19825 __cmpxchg_wrong_size(); \
19827 asm volatile("\t" ASM_STAC "\n" \
19828 - "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19829 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19830 "2:\t" ASM_CLAC "\n" \
19831 "\t.section .fixup, \"ax\"\n" \
19832 "3:\tmov %3, %0\n" \
19835 _ASM_EXTABLE(1b, 3b) \
19836 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19837 + : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19838 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19841 @@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19843 __cmpxchg_wrong_size(); \
19845 + pax_close_userland(); \
19849 @@ -636,17 +715,6 @@ extern struct movsl_mask {
19851 #define ARCH_HAS_NOCACHE_UACCESS 1
19853 -#ifdef CONFIG_X86_32
19854 -# include <asm/uaccess_32.h>
19856 -# include <asm/uaccess_64.h>
19859 -unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19861 -unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19864 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19865 # define copy_user_diag __compiletime_error
19867 @@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19868 extern void copy_user_diag("copy_from_user() buffer size is too small")
19869 copy_from_user_overflow(void);
19870 extern void copy_user_diag("copy_to_user() buffer size is too small")
19871 -copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19872 +copy_to_user_overflow(void);
19874 #undef copy_user_diag
19876 @@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19879 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19880 -__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19881 +__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19882 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19885 @@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19889 +#ifdef CONFIG_X86_32
19890 +# include <asm/uaccess_32.h>
19892 +# include <asm/uaccess_64.h>
19895 static inline unsigned long __must_check
19896 copy_from_user(void *to, const void __user *from, unsigned long n)
19898 - int sz = __compiletime_object_size(to);
19899 + size_t sz = __compiletime_object_size(to);
19903 @@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19904 * case, and do only runtime checking for non-constant sizes.
19907 - if (likely(sz < 0 || sz >= n))
19908 - n = _copy_from_user(to, from, n);
19909 - else if(__builtin_constant_p(n))
19910 - copy_from_user_overflow();
19912 - __copy_from_user_overflow(sz, n);
19913 + if (likely(sz != (size_t)-1 && sz < n)) {
19914 + if(__builtin_constant_p(n))
19915 + copy_from_user_overflow();
19917 + __copy_from_user_overflow(sz, n);
19918 + } else if (access_ok(VERIFY_READ, from, n))
19919 + n = __copy_from_user(to, from, n);
19920 + else if ((long)n > 0)
19921 + memset(to, 0, n);
19925 @@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19926 static inline unsigned long __must_check
19927 copy_to_user(void __user *to, const void *from, unsigned long n)
19929 - int sz = __compiletime_object_size(from);
19930 + size_t sz = __compiletime_object_size(from);
19934 /* See the comment in copy_from_user() above. */
19935 - if (likely(sz < 0 || sz >= n))
19936 - n = _copy_to_user(to, from, n);
19937 - else if(__builtin_constant_p(n))
19938 - copy_to_user_overflow();
19940 - __copy_to_user_overflow(sz, n);
19941 + if (likely(sz != (size_t)-1 && sz < n)) {
19942 + if(__builtin_constant_p(n))
19943 + copy_to_user_overflow();
19945 + __copy_to_user_overflow(sz, n);
19946 + } else if (access_ok(VERIFY_WRITE, to, n))
19947 + n = __copy_to_user(to, from, n);
19951 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19952 index 3c03a5d..edb68ae 100644
19953 --- a/arch/x86/include/asm/uaccess_32.h
19954 +++ b/arch/x86/include/asm/uaccess_32.h
19955 @@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19956 * anything, so this is accurate.
19959 -static __always_inline unsigned long __must_check
19960 +static __always_inline __size_overflow(3) unsigned long __must_check
19961 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19966 + check_object_size(from, n, true);
19968 if (__builtin_constant_p(n)) {
19971 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19972 __copy_to_user(void __user *to, const void *from, unsigned long n)
19976 return __copy_to_user_inatomic(to, from, n);
19979 -static __always_inline unsigned long
19980 +static __always_inline __size_overflow(3) unsigned long
19981 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19986 /* Avoid zeroing the tail if the copy fails..
19987 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19988 * but as the zeroing behaviour is only significant when n is not
19989 @@ -137,6 +146,12 @@ static __always_inline unsigned long
19990 __copy_from_user(void *to, const void __user *from, unsigned long n)
19997 + check_object_size(to, n, false);
19999 if (__builtin_constant_p(n)) {
20002 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20003 const void __user *from, unsigned long n)
20010 if (__builtin_constant_p(n)) {
20013 @@ -181,7 +200,10 @@ static __always_inline unsigned long
20014 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20017 - return __copy_from_user_ll_nocache_nozero(to, from, n);
20021 + return __copy_from_user_ll_nocache_nozero(to, from, n);
20024 #endif /* _ASM_X86_UACCESS_32_H */
20025 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20026 index f2f9b39..2ae1bf8 100644
20027 --- a/arch/x86/include/asm/uaccess_64.h
20028 +++ b/arch/x86/include/asm/uaccess_64.h
20030 #include <asm/alternative.h>
20031 #include <asm/cpufeature.h>
20032 #include <asm/page.h>
20033 +#include <asm/pgtable.h>
20035 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
20038 * Copy To/From Userspace
20039 @@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20040 __must_check unsigned long
20041 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20043 -static __always_inline __must_check unsigned long
20044 -copy_user_generic(void *to, const void *from, unsigned len)
20045 +static __always_inline __must_check __size_overflow(3) unsigned long
20046 +copy_user_generic(void *to, const void *from, unsigned long len)
20050 @@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20053 __must_check unsigned long
20054 -copy_in_user(void __user *to, const void __user *from, unsigned len);
20055 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
20057 static __always_inline __must_check
20058 -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20059 +unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20062 + size_t sz = __compiletime_object_size(dst);
20063 + unsigned ret = 0;
20065 + if (size > INT_MAX)
20068 + check_object_size(dst, size, false);
20070 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20071 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20075 + if (unlikely(sz != (size_t)-1 && sz < size)) {
20076 + if(__builtin_constant_p(size))
20077 + copy_from_user_overflow();
20079 + __copy_from_user_overflow(sz, size);
20083 if (!__builtin_constant_p(size))
20084 - return copy_user_generic(dst, (__force void *)src, size);
20085 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20087 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20088 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20089 ret, "b", "b", "=q", 1);
20091 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20092 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20093 ret, "w", "w", "=r", 2);
20095 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20096 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20097 ret, "l", "k", "=r", 4);
20099 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20100 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20101 ret, "q", "", "=r", 8);
20104 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20105 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20106 ret, "q", "", "=r", 10);
20109 __get_user_asm(*(u16 *)(8 + (char *)dst),
20110 - (u16 __user *)(8 + (char __user *)src),
20111 + (const u16 __user *)(8 + (const char __user *)src),
20112 ret, "w", "w", "=r", 2);
20115 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20116 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20117 ret, "q", "", "=r", 16);
20120 __get_user_asm(*(u64 *)(8 + (char *)dst),
20121 - (u64 __user *)(8 + (char __user *)src),
20122 + (const u64 __user *)(8 + (const char __user *)src),
20123 ret, "q", "", "=r", 8);
20126 - return copy_user_generic(dst, (__force void *)src, size);
20127 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20131 static __always_inline __must_check
20132 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
20133 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20136 return __copy_from_user_nocheck(dst, src, size);
20139 static __always_inline __must_check
20140 -int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20141 +unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20144 + size_t sz = __compiletime_object_size(src);
20145 + unsigned ret = 0;
20147 + if (size > INT_MAX)
20150 + check_object_size(src, size, true);
20152 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20153 + if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20157 + if (unlikely(sz != (size_t)-1 && sz < size)) {
20158 + if(__builtin_constant_p(size))
20159 + copy_to_user_overflow();
20161 + __copy_to_user_overflow(sz, size);
20165 if (!__builtin_constant_p(size))
20166 - return copy_user_generic((__force void *)dst, src, size);
20167 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20169 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20170 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20171 ret, "b", "b", "iq", 1);
20173 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20174 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20175 ret, "w", "w", "ir", 2);
20177 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20178 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20179 ret, "l", "k", "ir", 4);
20181 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20182 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20183 ret, "q", "", "er", 8);
20186 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20187 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20188 ret, "q", "", "er", 10);
20191 asm("":::"memory");
20192 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20193 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20194 ret, "w", "w", "ir", 2);
20197 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20198 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20199 ret, "q", "", "er", 16);
20202 asm("":::"memory");
20203 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20204 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20205 ret, "q", "", "er", 8);
20208 - return copy_user_generic((__force void *)dst, src, size);
20209 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20213 static __always_inline __must_check
20214 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
20215 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20218 return __copy_to_user_nocheck(dst, src, size);
20221 static __always_inline __must_check
20222 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20223 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20226 + unsigned ret = 0;
20230 + if (size > INT_MAX)
20233 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20234 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20236 + if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20240 if (!__builtin_constant_p(size))
20241 - return copy_user_generic((__force void *)dst,
20242 - (__force void *)src, size);
20243 + return copy_user_generic((__force_kernel void *)____m(dst),
20244 + (__force_kernel const void *)____m(src), size);
20248 - __get_user_asm(tmp, (u8 __user *)src,
20249 + __get_user_asm(tmp, (const u8 __user *)src,
20250 ret, "b", "b", "=q", 1);
20252 __put_user_asm(tmp, (u8 __user *)dst,
20253 @@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20257 - __get_user_asm(tmp, (u16 __user *)src,
20258 + __get_user_asm(tmp, (const u16 __user *)src,
20259 ret, "w", "w", "=r", 2);
20261 __put_user_asm(tmp, (u16 __user *)dst,
20262 @@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20266 - __get_user_asm(tmp, (u32 __user *)src,
20267 + __get_user_asm(tmp, (const u32 __user *)src,
20268 ret, "l", "k", "=r", 4);
20270 __put_user_asm(tmp, (u32 __user *)dst,
20271 @@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20275 - __get_user_asm(tmp, (u64 __user *)src,
20276 + __get_user_asm(tmp, (const u64 __user *)src,
20277 ret, "q", "", "=r", 8);
20279 __put_user_asm(tmp, (u64 __user *)dst,
20280 @@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20284 - return copy_user_generic((__force void *)dst,
20285 - (__force void *)src, size);
20286 + return copy_user_generic((__force_kernel void *)____m(dst),
20287 + (__force_kernel const void *)____m(src), size);
20291 -static __must_check __always_inline int
20292 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20293 +static __must_check __always_inline unsigned long
20294 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20296 return __copy_from_user_nocheck(dst, src, size);
20299 -static __must_check __always_inline int
20300 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20301 +static __must_check __always_inline unsigned long
20302 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20304 return __copy_to_user_nocheck(dst, src, size);
20307 -extern long __copy_user_nocache(void *dst, const void __user *src,
20308 - unsigned size, int zerorest);
20309 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20310 + unsigned long size, int zerorest);
20313 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20314 +static inline unsigned long
20315 +__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20319 + if (size > INT_MAX)
20322 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20323 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20327 return __copy_user_nocache(dst, src, size, 1);
20331 +static inline unsigned long
20332 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20334 + unsigned long size)
20336 + if (size > INT_MAX)
20339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20340 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20344 return __copy_user_nocache(dst, src, size, 0);
20348 -copy_user_handle_tail(char *to, char *from, unsigned len);
20349 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20351 #endif /* _ASM_X86_UACCESS_64_H */
20352 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20353 index 5b238981..77fdd78 100644
20354 --- a/arch/x86/include/asm/word-at-a-time.h
20355 +++ b/arch/x86/include/asm/word-at-a-time.h
20357 * and shift, for example.
20359 struct word_at_a_time {
20360 - const unsigned long one_bits, high_bits;
20361 + unsigned long one_bits, high_bits;
20364 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20365 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20366 index f58a9c7..dc378042a 100644
20367 --- a/arch/x86/include/asm/x86_init.h
20368 +++ b/arch/x86/include/asm/x86_init.h
20369 @@ -129,7 +129,7 @@ struct x86_init_ops {
20370 struct x86_init_timers timers;
20371 struct x86_init_iommu iommu;
20372 struct x86_init_pci pci;
20377 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20378 @@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20379 void (*setup_percpu_clockev)(void);
20380 void (*early_percpu_clock_init)(void);
20381 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20387 @@ -168,7 +168,7 @@ struct x86_platform_ops {
20388 void (*save_sched_clock_state)(void);
20389 void (*restore_sched_clock_state)(void);
20390 void (*apic_post_init)(void);
20396 @@ -182,7 +182,7 @@ struct x86_msi_ops {
20397 void (*teardown_msi_irqs)(struct pci_dev *dev);
20398 void (*restore_msi_irqs)(struct pci_dev *dev);
20399 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20403 struct IO_APIC_route_entry;
20404 struct io_apic_irq_attr;
20405 @@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20406 unsigned int destination, int vector,
20407 struct io_apic_irq_attr *attr);
20408 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20412 extern struct x86_init_ops x86_init;
20413 extern struct x86_cpuinit_ops x86_cpuinit;
20414 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20415 index c44a5d5..7f83cfc 100644
20416 --- a/arch/x86/include/asm/xen/page.h
20417 +++ b/arch/x86/include/asm/xen/page.h
20418 @@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20419 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20420 * cases needing an extended handling.
20422 -static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20423 +static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20427 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20428 index c9a6d68..cb57f42 100644
20429 --- a/arch/x86/include/asm/xsave.h
20430 +++ b/arch/x86/include/asm/xsave.h
20431 @@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20435 + pax_open_userland();
20436 __asm__ __volatile__(ASM_STAC "\n"
20441 "2: " ASM_CLAC "\n"
20443 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20445 + pax_close_userland();
20449 @@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20450 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20453 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20454 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20456 u32 hmask = mask >> 32;
20458 + pax_open_userland();
20459 __asm__ __volatile__(ASM_STAC "\n"
20464 "2: " ASM_CLAC "\n"
20466 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20467 : "memory"); /* memory required? */
20468 + pax_close_userland();
20472 diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20473 index 960a8a9..404daf7 100644
20474 --- a/arch/x86/include/uapi/asm/e820.h
20475 +++ b/arch/x86/include/uapi/asm/e820.h
20476 @@ -68,7 +68,7 @@ struct e820map {
20477 #define ISA_START_ADDRESS 0xa0000
20478 #define ISA_END_ADDRESS 0x100000
20480 -#define BIOS_BEGIN 0x000a0000
20481 +#define BIOS_BEGIN 0x000c0000
20482 #define BIOS_END 0x00100000
20484 #define BIOS_ROM_BASE 0xffe00000
20485 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20486 index 9bcd0b5..750f1b7 100644
20487 --- a/arch/x86/kernel/Makefile
20488 +++ b/arch/x86/kernel/Makefile
20489 @@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20490 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20491 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20492 obj-y += probe_roms.o
20493 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20494 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20495 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20496 obj-$(CONFIG_X86_64) += mcount_64.o
20497 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20498 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20499 index dbe76a1..e2ec334 100644
20500 --- a/arch/x86/kernel/acpi/boot.c
20501 +++ b/arch/x86/kernel/acpi/boot.c
20502 @@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20503 * If your system is blacklisted here, but you find that acpi=force
20504 * works for you, please contact linux-acpi@vger.kernel.org
20506 -static struct dmi_system_id __initdata acpi_dmi_table[] = {
20507 +static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20509 * Boxes that need ACPI disabled
20511 @@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20514 /* second table for DMI checks that should run after early-quirks */
20515 -static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20516 +static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20518 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20519 * which includes some code which overrides all temperature
20520 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20521 index d1daead..acd77e2 100644
20522 --- a/arch/x86/kernel/acpi/sleep.c
20523 +++ b/arch/x86/kernel/acpi/sleep.c
20524 @@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20525 #else /* CONFIG_64BIT */
20527 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20529 + pax_open_kernel();
20530 early_gdt_descr.address =
20531 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20532 + pax_close_kernel();
20534 initial_gs = per_cpu_offset(smp_processor_id());
20536 initial_code = (unsigned long)wakeup_long64;
20537 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20538 index 665c6b7..eae4d56 100644
20539 --- a/arch/x86/kernel/acpi/wakeup_32.S
20540 +++ b/arch/x86/kernel/acpi/wakeup_32.S
20541 @@ -29,13 +29,11 @@ wakeup_pmode_return:
20542 # and restore the stack ... but you need gdt for this to work
20543 movl saved_context_esp, %esp
20545 - movl %cs:saved_magic, %eax
20546 - cmpl $0x12345678, %eax
20547 + cmpl $0x12345678, saved_magic
20550 # jump to place where we left off
20551 - movl saved_eip, %eax
20557 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20558 index aef6531..2044b66 100644
20559 --- a/arch/x86/kernel/alternative.c
20560 +++ b/arch/x86/kernel/alternative.c
20561 @@ -248,7 +248,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
20562 unsigned int noplen = len;
20563 if (noplen > ASM_NOP_MAX)
20564 noplen = ASM_NOP_MAX;
20565 + pax_open_kernel();
20566 memcpy(insns, ideal_nops[noplen], noplen);
20567 + pax_close_kernel();
20571 @@ -276,6 +278,11 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
20572 if (a->replacementlen != 5)
20575 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20576 + if (orig_insn < (u8 *)_text || (u8 *)_einittext <= orig_insn)
20577 + orig_insn = ktva_ktla(orig_insn);
20580 o_dspl = *(s32 *)(insnbuf + 1);
20582 /* next_rip of the replacement JMP */
20583 @@ -362,7 +369,23 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20584 int insnbuf_sz = 0;
20586 instr = (u8 *)&a->instr_offset + a->instr_offset;
20588 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20589 + if ((u8 *)_text <= instr && instr < (u8 *)_einittext) {
20590 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20591 + instr = ktla_ktva(instr);
20595 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20597 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20598 + if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) {
20599 + replacement += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20600 + replacement = ktla_ktva(replacement);
20604 BUG_ON(a->instrlen > sizeof(insnbuf));
20605 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
20606 if (!boot_cpu_has(a->cpuid)) {
20607 @@ -402,6 +425,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20609 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
20611 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20612 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20613 + instr = ktva_ktla(instr);
20616 text_poke_early(instr, insnbuf, insnbuf_sz);
20619 @@ -416,10 +444,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20620 for (poff = start; poff < end; poff++) {
20621 u8 *ptr = (u8 *)poff + *poff;
20623 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20624 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20625 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20626 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20629 if (!*poff || ptr < text || ptr >= text_end)
20631 /* turn DS segment override prefix into lock prefix */
20632 - if (*ptr == 0x3e)
20633 + if (*ktla_ktva(ptr) == 0x3e)
20634 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20636 mutex_unlock(&text_mutex);
20637 @@ -434,10 +468,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20638 for (poff = start; poff < end; poff++) {
20639 u8 *ptr = (u8 *)poff + *poff;
20641 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20642 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20643 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20644 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20647 if (!*poff || ptr < text || ptr >= text_end)
20649 /* turn lock prefix into DS segment override prefix */
20650 - if (*ptr == 0xf0)
20651 + if (*ktla_ktva(ptr) == 0xf0)
20652 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20654 mutex_unlock(&text_mutex);
20655 @@ -574,7 +614,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20657 BUG_ON(p->len > MAX_PATCH_LEN);
20658 /* prep the buffer with the original instructions */
20659 - memcpy(insnbuf, p->instr, p->len);
20660 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20661 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20662 (unsigned long)p->instr, p->len);
20664 @@ -621,7 +661,7 @@ void __init alternative_instructions(void)
20665 if (!uniproc_patched || num_possible_cpus() == 1)
20666 free_init_pages("SMP alternatives",
20667 (unsigned long)__smp_locks,
20668 - (unsigned long)__smp_locks_end);
20669 + PAGE_ALIGN((unsigned long)__smp_locks_end));
20672 apply_paravirt(__parainstructions, __parainstructions_end);
20673 @@ -641,13 +681,17 @@ void __init alternative_instructions(void)
20674 * instructions. And on the local CPU you need to be protected again NMI or MCE
20675 * handlers seeing an inconsistent instruction while you patch.
20677 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
20678 +void *__kprobes text_poke_early(void *addr, const void *opcode,
20681 unsigned long flags;
20682 local_irq_save(flags);
20683 - memcpy(addr, opcode, len);
20685 + pax_open_kernel();
20686 + memcpy(ktla_ktva(addr), opcode, len);
20688 + pax_close_kernel();
20690 local_irq_restore(flags);
20691 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20692 that causes hangs on some VIA CPUs. */
20693 @@ -669,36 +713,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20695 void *text_poke(void *addr, const void *opcode, size_t len)
20697 - unsigned long flags;
20699 + unsigned char *vaddr = ktla_ktva(addr);
20700 struct page *pages[2];
20704 if (!core_kernel_text((unsigned long)addr)) {
20705 - pages[0] = vmalloc_to_page(addr);
20706 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20707 + pages[0] = vmalloc_to_page(vaddr);
20708 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20710 - pages[0] = virt_to_page(addr);
20711 + pages[0] = virt_to_page(vaddr);
20712 WARN_ON(!PageReserved(pages[0]));
20713 - pages[1] = virt_to_page(addr + PAGE_SIZE);
20714 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20717 - local_irq_save(flags);
20718 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20720 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20721 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20722 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20723 - clear_fixmap(FIX_TEXT_POKE0);
20725 - clear_fixmap(FIX_TEXT_POKE1);
20726 - local_flush_tlb();
20728 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
20729 - that causes hangs on some VIA CPUs. */
20730 + text_poke_early(addr, opcode, len);
20731 for (i = 0; i < len; i++)
20732 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20733 - local_irq_restore(flags);
20734 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20738 @@ -752,7 +782,7 @@ int poke_int3_handler(struct pt_regs *regs)
20740 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20742 - unsigned char int3 = 0xcc;
20743 + const unsigned char int3 = 0xcc;
20745 bp_int3_handler = handler;
20746 bp_int3_addr = (u8 *)addr + sizeof(int3);
20747 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20748 index dcb5285..cc79e9d 100644
20749 --- a/arch/x86/kernel/apic/apic.c
20750 +++ b/arch/x86/kernel/apic/apic.c
20751 @@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20753 * Debug level, exported for io_apic.c
20755 -unsigned int apic_verbosity;
20756 +int apic_verbosity;
20760 @@ -1857,7 +1857,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20761 apic_write(APIC_ESR, 0);
20762 v = apic_read(APIC_ESR);
20764 - atomic_inc(&irq_err_count);
20765 + atomic_inc_unchecked(&irq_err_count);
20767 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20768 smp_processor_id(), v);
20769 diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20770 index de918c4..32eed23 100644
20771 --- a/arch/x86/kernel/apic/apic_flat_64.c
20772 +++ b/arch/x86/kernel/apic/apic_flat_64.c
20773 @@ -154,7 +154,7 @@ static int flat_probe(void)
20777 -static struct apic apic_flat = {
20778 +static struct apic apic_flat __read_only = {
20780 .probe = flat_probe,
20781 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20782 @@ -260,7 +260,7 @@ static int physflat_probe(void)
20786 -static struct apic apic_physflat = {
20787 +static struct apic apic_physflat __read_only = {
20789 .name = "physical flat",
20790 .probe = physflat_probe,
20791 diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20792 index b205cdb..d8503ff 100644
20793 --- a/arch/x86/kernel/apic/apic_noop.c
20794 +++ b/arch/x86/kernel/apic/apic_noop.c
20795 @@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20796 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20799 -struct apic apic_noop = {
20800 +struct apic apic_noop __read_only = {
20802 .probe = noop_probe,
20803 .acpi_madt_oem_check = NULL,
20804 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20805 index c4a8d63..fe893ac 100644
20806 --- a/arch/x86/kernel/apic/bigsmp_32.c
20807 +++ b/arch/x86/kernel/apic/bigsmp_32.c
20808 @@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20812 -static struct apic apic_bigsmp = {
20813 +static struct apic apic_bigsmp __read_only = {
20816 .probe = probe_bigsmp,
20817 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20818 index f4dc246..fbab133 100644
20819 --- a/arch/x86/kernel/apic/io_apic.c
20820 +++ b/arch/x86/kernel/apic/io_apic.c
20821 @@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20825 -atomic_t irq_mis_count;
20826 +atomic_unchecked_t irq_mis_count;
20828 #ifdef CONFIG_GENERIC_PENDING_IRQ
20829 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20830 @@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20833 if (!(v & (1 << (i & 0x1f)))) {
20834 - atomic_inc(&irq_mis_count);
20835 + atomic_inc_unchecked(&irq_mis_count);
20837 eoi_ioapic_irq(irq, cfg);
20839 @@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20840 ioapic_irqd_unmask(data, cfg, masked);
20843 -static struct irq_chip ioapic_chip __read_mostly = {
20844 +static struct irq_chip ioapic_chip = {
20846 .irq_startup = startup_ioapic_irq,
20847 .irq_mask = mask_ioapic_irq,
20848 @@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20852 -static struct irq_chip lapic_chip __read_mostly = {
20853 +static struct irq_chip lapic_chip = {
20854 .name = "local-APIC",
20855 .irq_mask = mask_lapic_irq,
20856 .irq_unmask = unmask_lapic_irq,
20857 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20858 index bda4886..f9c7195 100644
20859 --- a/arch/x86/kernel/apic/probe_32.c
20860 +++ b/arch/x86/kernel/apic/probe_32.c
20861 @@ -72,7 +72,7 @@ static int probe_default(void)
20865 -static struct apic apic_default = {
20866 +static struct apic apic_default __read_only = {
20869 .probe = probe_default,
20870 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20871 index 6cedd79..023ff8e 100644
20872 --- a/arch/x86/kernel/apic/vector.c
20873 +++ b/arch/x86/kernel/apic/vector.c
20876 static DEFINE_RAW_SPINLOCK(vector_lock);
20878 -void lock_vector_lock(void)
20879 +void lock_vector_lock(void) __acquires(vector_lock)
20881 /* Used to the online set of cpus does not change
20882 * during assign_irq_vector.
20883 @@ -29,7 +29,7 @@ void lock_vector_lock(void)
20884 raw_spin_lock(&vector_lock);
20887 -void unlock_vector_lock(void)
20888 +void unlock_vector_lock(void) __releases(vector_lock)
20890 raw_spin_unlock(&vector_lock);
20892 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20893 index ab3219b..e8033eb 100644
20894 --- a/arch/x86/kernel/apic/x2apic_cluster.c
20895 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
20896 @@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20897 return notifier_from_errno(err);
20900 -static struct notifier_block __refdata x2apic_cpu_notifier = {
20901 +static struct notifier_block x2apic_cpu_notifier = {
20902 .notifier_call = update_clusterinfo,
20905 @@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20906 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20909 -static struct apic apic_x2apic_cluster = {
20910 +static struct apic apic_x2apic_cluster __read_only = {
20912 .name = "cluster x2apic",
20913 .probe = x2apic_cluster_probe,
20914 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20915 index 6fae733..5ca17af 100644
20916 --- a/arch/x86/kernel/apic/x2apic_phys.c
20917 +++ b/arch/x86/kernel/apic/x2apic_phys.c
20918 @@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20919 return apic == &apic_x2apic_phys;
20922 -static struct apic apic_x2apic_phys = {
20923 +static struct apic apic_x2apic_phys __read_only = {
20925 .name = "physical x2apic",
20926 .probe = x2apic_phys_probe,
20927 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20928 index c8d9295..9af2d03 100644
20929 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
20930 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20931 @@ -375,7 +375,7 @@ static int uv_probe(void)
20932 return apic == &apic_x2apic_uv_x;
20935 -static struct apic __refdata apic_x2apic_uv_x = {
20936 +static struct apic apic_x2apic_uv_x __read_only = {
20938 .name = "UV large system",
20940 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20941 index 927ec92..de68f32 100644
20942 --- a/arch/x86/kernel/apm_32.c
20943 +++ b/arch/x86/kernel/apm_32.c
20944 @@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20945 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20946 * even though they are called in protected mode.
20948 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20949 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20950 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20952 static const char driver_version[] = "1.16ac"; /* no spaces */
20953 @@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20955 gdt = get_cpu_gdt_table(cpu);
20956 save_desc_40 = gdt[0x40 / 8];
20958 + pax_open_kernel();
20959 gdt[0x40 / 8] = bad_bios_desc;
20960 + pax_close_kernel();
20962 apm_irq_save(flags);
20964 @@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20966 APM_DO_RESTORE_SEGS;
20967 apm_irq_restore(flags);
20969 + pax_open_kernel();
20970 gdt[0x40 / 8] = save_desc_40;
20971 + pax_close_kernel();
20975 return call->eax & 0xff;
20976 @@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
20978 gdt = get_cpu_gdt_table(cpu);
20979 save_desc_40 = gdt[0x40 / 8];
20981 + pax_open_kernel();
20982 gdt[0x40 / 8] = bad_bios_desc;
20983 + pax_close_kernel();
20985 apm_irq_save(flags);
20987 @@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
20989 APM_DO_RESTORE_SEGS;
20990 apm_irq_restore(flags);
20992 + pax_open_kernel();
20993 gdt[0x40 / 8] = save_desc_40;
20994 + pax_close_kernel();
20999 @@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21003 -static struct dmi_system_id __initdata apm_dmi_table[] = {
21004 +static const struct dmi_system_id __initconst apm_dmi_table[] = {
21007 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21008 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21009 * code to that CPU.
21011 gdt = get_cpu_gdt_table(0);
21013 + pax_open_kernel();
21014 set_desc_base(&gdt[APM_CS >> 3],
21015 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21016 set_desc_base(&gdt[APM_CS_16 >> 3],
21017 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21018 set_desc_base(&gdt[APM_DS >> 3],
21019 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21020 + pax_close_kernel();
21022 proc_create("apm", 0, NULL, &apm_file_ops);
21024 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21025 index 9f6b934..cf5ffb3 100644
21026 --- a/arch/x86/kernel/asm-offsets.c
21027 +++ b/arch/x86/kernel/asm-offsets.c
21028 @@ -32,6 +32,8 @@ void common(void) {
21029 OFFSET(TI_flags, thread_info, flags);
21030 OFFSET(TI_status, thread_info, status);
21031 OFFSET(TI_addr_limit, thread_info, addr_limit);
21032 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21033 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21036 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21037 @@ -52,8 +54,26 @@ void common(void) {
21038 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21039 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21040 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21042 +#ifdef CONFIG_PAX_KERNEXEC
21043 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21046 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21047 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21048 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21049 +#ifdef CONFIG_X86_64
21050 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21057 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21058 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21059 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21063 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21064 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21065 index 5ce6f2d..9e738f3 100644
21066 --- a/arch/x86/kernel/asm-offsets_64.c
21067 +++ b/arch/x86/kernel/asm-offsets_64.c
21068 @@ -80,6 +80,7 @@ int main(void)
21072 + DEFINE(TSS_size, sizeof(struct tss_struct));
21073 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21074 OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
21076 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21077 index 9bff687..5b899fb 100644
21078 --- a/arch/x86/kernel/cpu/Makefile
21079 +++ b/arch/x86/kernel/cpu/Makefile
21080 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21081 CFLAGS_REMOVE_perf_event.o = -pg
21084 -# Make sure load_percpu_segment has no stackprotector
21085 -nostackp := $(call cc-option, -fno-stack-protector)
21086 -CFLAGS_common.o := $(nostackp)
21088 obj-y := intel_cacheinfo.o scattered.o topology.o
21091 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21092 index e4cf633..941f450 100644
21093 --- a/arch/x86/kernel/cpu/amd.c
21094 +++ b/arch/x86/kernel/cpu/amd.c
21095 @@ -729,7 +729,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21096 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21098 /* AMD errata T13 (order #21922) */
21099 - if ((c->x86 == 6)) {
21100 + if (c->x86 == 6) {
21102 if (c->x86_model == 3 && c->x86_mask == 0)
21104 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21105 index a62cf04..041e39c 100644
21106 --- a/arch/x86/kernel/cpu/common.c
21107 +++ b/arch/x86/kernel/cpu/common.c
21108 @@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21110 static const struct cpu_dev *this_cpu = &default_cpu;
21112 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21113 -#ifdef CONFIG_X86_64
21115 - * We need valid kernel segments for data and code in long mode too
21116 - * IRET will check the segment types kkeil 2000/10/28
21117 - * Also sysret mandates a special GDT layout
21119 - * TLS descriptors are currently at a different place compared to i386.
21120 - * Hopefully nobody expects them at a fixed place (Wine?)
21122 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21123 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21124 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21125 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21126 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21127 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21129 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21130 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21131 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21132 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21134 - * Segments used for calling PnP BIOS have byte granularity.
21135 - * They code segments and data segments have fixed 64k limits,
21136 - * the transfer segment sizes are set at run time.
21138 - /* 32-bit code */
21139 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21140 - /* 16-bit code */
21141 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21142 - /* 16-bit data */
21143 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21144 - /* 16-bit data */
21145 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21146 - /* 16-bit data */
21147 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21149 - * The APM segments have byte granularity and their bases
21150 - * are set at run time. All have 64k limits.
21152 - /* 32-bit code */
21153 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21154 - /* 16-bit code */
21155 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21157 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21159 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21160 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161 - GDT_STACK_CANARY_INIT
21164 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21166 static int __init x86_xsave_setup(char *s)
21169 @@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21173 +#ifdef CONFIG_X86_64
21174 +static __init int setup_disable_pcid(char *arg)
21176 + setup_clear_cpu_cap(X86_FEATURE_PCID);
21177 + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21180 + if (clone_pgd_mask != ~(pgdval_t)0UL)
21181 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21186 +__setup("nopcid", setup_disable_pcid);
21188 +static void setup_pcid(struct cpuinfo_x86 *c)
21190 + if (!cpu_has(c, X86_FEATURE_PCID)) {
21191 + clear_cpu_cap(c, X86_FEATURE_INVPCID);
21193 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21194 + if (clone_pgd_mask != ~(pgdval_t)0UL) {
21195 + pax_open_kernel();
21196 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21197 + pax_close_kernel();
21198 + printk("PAX: slow and weak UDEREF enabled\n");
21200 + printk("PAX: UDEREF disabled\n");
21206 + printk("PAX: PCID detected\n");
21207 + cr4_set_bits(X86_CR4_PCIDE);
21209 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21210 + pax_open_kernel();
21211 + clone_pgd_mask = ~(pgdval_t)0UL;
21212 + pax_close_kernel();
21213 + if (pax_user_shadow_base)
21214 + printk("PAX: weak UDEREF enabled\n");
21216 + set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21217 + printk("PAX: strong UDEREF enabled\n");
21221 + if (cpu_has(c, X86_FEATURE_INVPCID))
21222 + printk("PAX: INVPCID detected\n");
21227 * Some CPU features depend on higher CPUID levels, which may not always
21228 * be available due to CPUID level capping or broken virtualization
21229 @@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21231 struct desc_ptr gdt_descr;
21233 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21234 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21235 gdt_descr.size = GDT_SIZE - 1;
21236 load_gdt(&gdt_descr);
21237 /* Reload the per-cpu base */
21238 @@ -935,6 +934,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21242 +#ifdef CONFIG_X86_32
21243 +#ifdef CONFIG_PAX_PAGEEXEC
21244 + if (!(__supported_pte_mask & _PAGE_NX))
21245 + clear_cpu_cap(c, X86_FEATURE_PSE);
21247 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21248 + clear_cpu_cap(c, X86_FEATURE_SEP);
21252 +#ifdef CONFIG_X86_64
21257 * The vendor-specific functions might have changed features.
21258 * Now we do "generic changes."
21259 @@ -1009,7 +1022,7 @@ void enable_sep_cpu(void)
21263 - tss = &per_cpu(cpu_tss, cpu);
21264 + tss = cpu_tss + cpu;
21266 if (!boot_cpu_has(X86_FEATURE_SEP))
21268 @@ -1155,14 +1168,16 @@ static __init int setup_disablecpuid(char *arg)
21270 __setup("clearcpuid=", setup_disablecpuid);
21272 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21273 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
21275 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21276 - (unsigned long)&init_thread_union + THREAD_SIZE;
21277 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21278 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21280 #ifdef CONFIG_X86_64
21281 -struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21282 -struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21283 - (unsigned long) debug_idt_table };
21284 +struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21285 +const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21287 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21288 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21289 @@ -1367,7 +1382,7 @@ void cpu_init(void)
21293 - t = &per_cpu(cpu_tss, cpu);
21294 + t = cpu_tss + cpu;
21295 oist = &per_cpu(orig_ist, cpu);
21298 @@ -1399,7 +1414,6 @@ void cpu_init(void)
21299 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21302 - x86_configure_nx();
21306 @@ -1451,7 +1465,7 @@ void cpu_init(void)
21308 int cpu = smp_processor_id();
21309 struct task_struct *curr = current;
21310 - struct tss_struct *t = &per_cpu(cpu_tss, cpu);
21311 + struct tss_struct *t = cpu_tss + cpu;
21312 struct thread_struct *thread = &curr->thread;
21314 wait_for_master_cpu(cpu);
21315 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21316 index edcb0e2..a138233 100644
21317 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21318 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21319 @@ -519,25 +519,23 @@ cache_private_attrs_is_visible(struct kobject *kobj,
21323 +static struct attribute *amd_l3_attrs[4];
21325 static struct attribute_group cache_private_group = {
21326 .is_visible = cache_private_attrs_is_visible,
21327 + .attrs = amd_l3_attrs,
21330 static void init_amd_l3_attrs(void)
21333 - static struct attribute **amd_l3_attrs;
21335 - if (amd_l3_attrs) /* already initialized */
21338 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21340 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21343 - amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
21344 - if (!amd_l3_attrs)
21345 + if (n > 1 && amd_l3_attrs[0]) /* already initialized */
21349 @@ -547,8 +545,6 @@ static void init_amd_l3_attrs(void)
21351 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21352 amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
21354 - cache_private_group.attrs = amd_l3_attrs;
21357 const struct attribute_group *
21358 @@ -559,7 +555,7 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
21359 if (this_leaf->level < 3 || !nb)
21362 - if (nb && nb->l3_cache.indices)
21363 + if (nb->l3_cache.indices)
21364 init_amd_l3_attrs();
21366 return &cache_private_group;
21367 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21368 index 20190bd..cadb2ab 100644
21369 --- a/arch/x86/kernel/cpu/mcheck/mce.c
21370 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
21372 #include <asm/tlbflush.h>
21373 #include <asm/mce.h>
21374 #include <asm/msr.h>
21375 +#include <asm/local.h>
21377 #include "mce-internal.h"
21379 @@ -256,7 +257,7 @@ static void print_mce(struct mce *m)
21380 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21383 - if (m->cs == __KERNEL_CS)
21384 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21385 print_symbol("{%s}", m->ip);
21388 @@ -289,10 +290,10 @@ static void print_mce(struct mce *m)
21390 #define PANIC_TIMEOUT 5 /* 5 seconds */
21392 -static atomic_t mce_panicked;
21393 +static atomic_unchecked_t mce_panicked;
21395 static int fake_panic;
21396 -static atomic_t mce_fake_panicked;
21397 +static atomic_unchecked_t mce_fake_panicked;
21399 /* Panic in progress. Enable interrupts and wait for final IPI */
21400 static void wait_for_panic(void)
21401 @@ -316,7 +317,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21403 * Make sure only one CPU runs in machine check panic
21405 - if (atomic_inc_return(&mce_panicked) > 1)
21406 + if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21410 @@ -324,7 +325,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21413 /* Don't log too much for fake panic */
21414 - if (atomic_inc_return(&mce_fake_panicked) > 1)
21415 + if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21418 /* First print corrected ones that are still unlogged */
21419 @@ -363,7 +364,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21421 if (panic_timeout == 0)
21422 panic_timeout = mca_cfg.panic_timeout;
21424 + panic("%s", msg);
21426 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21428 @@ -749,7 +750,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21429 * might have been modified by someone else.
21432 - if (atomic_read(&mce_panicked))
21433 + if (atomic_read_unchecked(&mce_panicked))
21435 if (!mca_cfg.monarch_timeout)
21437 @@ -1679,7 +1680,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21440 /* Call the installed machine check handler for this CPU setup. */
21441 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
21442 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21443 unexpected_machine_check;
21446 @@ -1702,7 +1703,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21450 + pax_open_kernel();
21451 machine_check_vector = do_machine_check;
21452 + pax_close_kernel();
21454 __mcheck_cpu_init_generic();
21455 __mcheck_cpu_init_vendor(c);
21456 @@ -1716,7 +1719,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21459 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21460 -static int mce_chrdev_open_count; /* #times opened */
21461 +static local_t mce_chrdev_open_count; /* #times opened */
21462 static int mce_chrdev_open_exclu; /* already open exclusive? */
21464 static int mce_chrdev_open(struct inode *inode, struct file *file)
21465 @@ -1724,7 +1727,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21466 spin_lock(&mce_chrdev_state_lock);
21468 if (mce_chrdev_open_exclu ||
21469 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21470 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21471 spin_unlock(&mce_chrdev_state_lock);
21474 @@ -1732,7 +1735,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21476 if (file->f_flags & O_EXCL)
21477 mce_chrdev_open_exclu = 1;
21478 - mce_chrdev_open_count++;
21479 + local_inc(&mce_chrdev_open_count);
21481 spin_unlock(&mce_chrdev_state_lock);
21483 @@ -1743,7 +1746,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21485 spin_lock(&mce_chrdev_state_lock);
21487 - mce_chrdev_open_count--;
21488 + local_dec(&mce_chrdev_open_count);
21489 mce_chrdev_open_exclu = 0;
21491 spin_unlock(&mce_chrdev_state_lock);
21492 @@ -2419,7 +2422,7 @@ static __init void mce_init_banks(void)
21494 for (i = 0; i < mca_cfg.banks; i++) {
21495 struct mce_bank *b = &mce_banks[i];
21496 - struct device_attribute *a = &b->attr;
21497 + device_attribute_no_const *a = &b->attr;
21499 sysfs_attr_init(&a->attr);
21500 a->attr.name = b->attrname;
21501 @@ -2526,7 +2529,7 @@ struct dentry *mce_get_debugfs_dir(void)
21502 static void mce_reset(void)
21505 - atomic_set(&mce_fake_panicked, 0);
21506 + atomic_set_unchecked(&mce_fake_panicked, 0);
21507 atomic_set(&mce_executing, 0);
21508 atomic_set(&mce_callin, 0);
21509 atomic_set(&global_nwo, 0);
21510 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21511 index 737b0ad..09ec66e 100644
21512 --- a/arch/x86/kernel/cpu/mcheck/p5.c
21513 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
21515 #include <asm/tlbflush.h>
21516 #include <asm/mce.h>
21517 #include <asm/msr.h>
21518 +#include <asm/pgtable.h>
21520 /* By default disabled */
21521 int mce_p5_enabled __read_mostly;
21522 @@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21523 if (!cpu_has(c, X86_FEATURE_MCE))
21526 + pax_open_kernel();
21527 machine_check_vector = pentium_machine_check;
21528 + pax_close_kernel();
21529 /* Make sure the vector pointer is visible before we enable MCEs: */
21532 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21533 index 44f1382..315b292 100644
21534 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
21535 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21537 #include <asm/tlbflush.h>
21538 #include <asm/mce.h>
21539 #include <asm/msr.h>
21540 +#include <asm/pgtable.h>
21542 /* Machine check handler for WinChip C6: */
21543 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21544 @@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21548 + pax_open_kernel();
21549 machine_check_vector = winchip_machine_check;
21550 + pax_close_kernel();
21551 /* Make sure the vector pointer is visible before we enable MCEs: */
21554 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21555 index 36a8361..e7058c2 100644
21556 --- a/arch/x86/kernel/cpu/microcode/core.c
21557 +++ b/arch/x86/kernel/cpu/microcode/core.c
21558 @@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21562 -static struct notifier_block __refdata mc_cpu_notifier = {
21563 +static struct notifier_block mc_cpu_notifier = {
21564 .notifier_call = mc_cpu_callback,
21567 diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21568 index a41bead..4e3685b 100644
21569 --- a/arch/x86/kernel/cpu/microcode/intel.c
21570 +++ b/arch/x86/kernel/cpu/microcode/intel.c
21571 @@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21573 static int get_ucode_user(void *to, const void *from, size_t n)
21575 - return copy_from_user(to, from, n);
21576 + return copy_from_user(to, (const void __force_user *)from, n);
21579 static enum ucode_state
21580 request_microcode_user(int cpu, const void __user *buf, size_t size)
21582 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21583 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21586 static void microcode_fini_cpu(int cpu)
21587 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21588 index ea5f363..cb0e905 100644
21589 --- a/arch/x86/kernel/cpu/mtrr/main.c
21590 +++ b/arch/x86/kernel/cpu/mtrr/main.c
21591 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21592 u64 size_or_mask, size_and_mask;
21593 static bool mtrr_aps_delayed_init;
21595 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21596 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21598 const struct mtrr_ops *mtrr_if;
21600 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21601 index df5e41f..816c719 100644
21602 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21603 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21604 @@ -25,7 +25,7 @@ struct mtrr_ops {
21605 int (*validate_add_page)(unsigned long base, unsigned long size,
21606 unsigned int type);
21607 int (*have_wrcomb)(void);
21611 extern int generic_get_free_region(unsigned long base, unsigned long size,
21613 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21614 index aa4e3a7..469370f 100644
21615 --- a/arch/x86/kernel/cpu/perf_event.c
21616 +++ b/arch/x86/kernel/cpu/perf_event.c
21617 @@ -1509,7 +1509,7 @@ static void __init pmu_check_apic(void)
21621 -static struct attribute_group x86_pmu_format_group = {
21622 +static attribute_group_no_const x86_pmu_format_group = {
21626 @@ -1608,7 +1608,7 @@ static struct attribute *events_attr[] = {
21630 -static struct attribute_group x86_pmu_events_group = {
21631 +static attribute_group_no_const x86_pmu_events_group = {
21633 .attrs = events_attr,
21635 @@ -2181,7 +2181,7 @@ static unsigned long get_segment_base(unsigned int segment)
21636 if (idx > GDT_ENTRIES)
21639 - desc = raw_cpu_ptr(gdt_page.gdt);
21640 + desc = get_cpu_gdt_table(smp_processor_id());
21643 return get_desc_base(desc + idx);
21644 @@ -2271,7 +2271,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21647 perf_callchain_store(entry, frame.return_address);
21648 - fp = frame.next_frame;
21649 + fp = (const void __force_user *)frame.next_frame;
21653 diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21654 index 97242a9..cf9c30e 100644
21655 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21656 +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21657 @@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21658 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21660 struct attribute **attrs;
21661 - struct attribute_group *attr_group;
21662 + attribute_group_no_const *attr_group;
21665 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21666 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21667 index 2813ea0..3ef5969c8 100644
21668 --- a/arch/x86/kernel/cpu/perf_event_intel.c
21669 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
21670 @@ -3033,10 +3033,10 @@ __init int intel_pmu_init(void)
21671 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21673 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21674 - u64 capabilities;
21675 + u64 capabilities = x86_pmu.intel_cap.capabilities;
21677 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21678 - x86_pmu.intel_cap.capabilities = capabilities;
21679 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21680 + x86_pmu.intel_cap.capabilities = capabilities;
21684 diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
21685 index 7795f3f..3535b76 100644
21686 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
21687 +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
21688 @@ -252,7 +252,7 @@ static void bts_event_start(struct perf_event *event, int flags)
21689 __bts_event_start(event);
21691 /* PMI handler: this counter is running and likely generating PMIs */
21692 - ACCESS_ONCE(bts->started) = 1;
21693 + ACCESS_ONCE_RW(bts->started) = 1;
21696 static void __bts_event_stop(struct perf_event *event)
21697 @@ -266,7 +266,7 @@ static void __bts_event_stop(struct perf_event *event)
21698 if (event->hw.state & PERF_HES_STOPPED)
21701 - ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
21702 + ACCESS_ONCE_RW(event->hw.state) |= PERF_HES_STOPPED;
21705 static void bts_event_stop(struct perf_event *event, int flags)
21706 @@ -274,7 +274,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
21707 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
21709 /* PMI handler: don't restart this counter */
21710 - ACCESS_ONCE(bts->started) = 0;
21711 + ACCESS_ONCE_RW(bts->started) = 0;
21713 __bts_event_stop(event);
21715 diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
21716 index e4d1b8b..2c6ffa0 100644
21717 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
21718 +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
21719 @@ -1352,7 +1352,9 @@ static int __init intel_cqm_init(void)
21723 - event_attr_intel_cqm_llc_scale.event_str = str;
21724 + pax_open_kernel();
21725 + *(const char **)&event_attr_intel_cqm_llc_scale.event_str = str;
21726 + pax_close_kernel();
21728 ret = intel_cqm_setup_rmid_cache();
21730 diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
21731 index 123ff1b..d53e500 100644
21732 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
21733 +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
21734 @@ -116,16 +116,12 @@ static const struct attribute_group *pt_attr_groups[] = {
21736 static int __init pt_pmu_hw_init(void)
21738 - struct dev_ext_attribute *de_attrs;
21739 - struct attribute **attrs;
21742 + static struct dev_ext_attribute de_attrs[ARRAY_SIZE(pt_caps)];
21743 + static struct attribute *attrs[ARRAY_SIZE(pt_caps)];
21748 if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
21752 for (i = 0; i < PT_CPUID_LEAVES; i++) {
21754 @@ -135,39 +131,25 @@ static int __init pt_pmu_hw_init(void)
21755 &pt_pmu.caps[CR_EDX + i*4]);
21759 - size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
21760 - attrs = kzalloc(size, GFP_KERNEL);
21764 - size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
21765 - de_attrs = kzalloc(size, GFP_KERNEL);
21769 + pax_open_kernel();
21770 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
21771 - struct dev_ext_attribute *de_attr = de_attrs + i;
21772 + struct dev_ext_attribute *de_attr = &de_attrs[i];
21774 - de_attr->attr.attr.name = pt_caps[i].name;
21775 + *(const char **)&de_attr->attr.attr.name = pt_caps[i].name;
21777 sysfs_attr_init(&de_attr->attr.attr);
21779 - de_attr->attr.attr.mode = S_IRUGO;
21780 - de_attr->attr.show = pt_cap_show;
21781 - de_attr->var = (void *)i;
21782 + *(umode_t *)&de_attr->attr.attr.mode = S_IRUGO;
21783 + *(void **)&de_attr->attr.show = pt_cap_show;
21784 + *(void **)&de_attr->var = (void *)i;
21786 attrs[i] = &de_attr->attr.attr;
21789 - pt_cap_group.attrs = attrs;
21790 + *(struct attribute ***)&pt_cap_group.attrs = attrs;
21791 + pax_close_kernel();
21801 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
21802 @@ -928,7 +910,7 @@ static void pt_event_start(struct perf_event *event, int mode)
21806 - ACCESS_ONCE(pt->handle_nmi) = 1;
21807 + ACCESS_ONCE_RW(pt->handle_nmi) = 1;
21808 event->hw.state = 0;
21810 pt_config_buffer(buf->cur->table, buf->cur_idx,
21811 @@ -945,7 +927,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
21812 * Protect against the PMI racing with disabling wrmsr,
21813 * see comment in intel_pt_interrupt().
21815 - ACCESS_ONCE(pt->handle_nmi) = 0;
21816 + ACCESS_ONCE_RW(pt->handle_nmi) = 0;
21817 pt_config_start(false);
21819 if (event->hw.state == PERF_HES_STOPPED)
21820 diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21821 index 358c54a..f068235 100644
21822 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21823 +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21824 @@ -487,7 +487,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21828 -static struct attribute_group rapl_pmu_events_group = {
21829 +static attribute_group_no_const rapl_pmu_events_group __read_only = {
21831 .attrs = NULL, /* patched at runtime */
21833 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21834 index 90b7c50..7863ae3 100644
21835 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21836 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21837 @@ -732,7 +732,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21838 static int __init uncore_type_init(struct intel_uncore_type *type)
21840 struct intel_uncore_pmu *pmus;
21841 - struct attribute_group *attr_group;
21842 + attribute_group_no_const *attr_group;
21843 struct attribute **attrs;
21846 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21847 index ceac8f5..a562de7 100644
21848 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21849 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21850 @@ -115,7 +115,7 @@ struct intel_uncore_box {
21851 struct uncore_event_desc {
21852 struct kobj_attribute attr;
21853 const char *config;
21857 ssize_t uncore_event_show(struct kobject *kobj,
21858 struct kobj_attribute *attr, char *buf);
21859 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21860 index 83741a7..bd3507d 100644
21861 --- a/arch/x86/kernel/cpuid.c
21862 +++ b/arch/x86/kernel/cpuid.c
21863 @@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21864 return notifier_from_errno(err);
21867 -static struct notifier_block __refdata cpuid_class_cpu_notifier =
21868 +static struct notifier_block cpuid_class_cpu_notifier =
21870 .notifier_call = cpuid_class_cpu_callback,
21872 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21873 index afa64ad..dce67dd 100644
21874 --- a/arch/x86/kernel/crash_dump_64.c
21875 +++ b/arch/x86/kernel/crash_dump_64.c
21876 @@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21880 - if (copy_to_user(buf, vaddr + offset, csize)) {
21881 + if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21885 diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21886 index f6dfd93..892ade4 100644
21887 --- a/arch/x86/kernel/doublefault.c
21888 +++ b/arch/x86/kernel/doublefault.c
21891 #define DOUBLEFAULT_STACKSIZE (1024)
21892 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21893 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21894 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21896 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21898 @@ -22,7 +22,7 @@ static void doublefault_fn(void)
21899 unsigned long gdt, tss;
21901 native_store_gdt(&gdt_desc);
21902 - gdt = gdt_desc.address;
21903 + gdt = (unsigned long)gdt_desc.address;
21905 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21907 @@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21908 /* 0x2 bit is always set */
21909 .flags = X86_EFLAGS_SF | 0x2,
21912 + .es = __KERNEL_DS,
21916 + .ds = __KERNEL_DS,
21917 .fs = __KERNEL_PERCPU,
21919 .__cr3 = __pa_nodebug(swapper_pg_dir),
21920 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21921 index 9c30acf..8cf2411 100644
21922 --- a/arch/x86/kernel/dumpstack.c
21923 +++ b/arch/x86/kernel/dumpstack.c
21925 * Copyright (C) 1991, 1992 Linus Torvalds
21926 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21928 +#ifdef CONFIG_GRKERNSEC_HIDESYM
21929 +#define __INCLUDED_BY_HIDESYM 1
21931 #include <linux/kallsyms.h>
21932 #include <linux/kprobes.h>
21933 #include <linux/uaccess.h>
21934 @@ -35,23 +38,21 @@ static void printk_stack_address(unsigned long address, int reliable,
21936 void printk_address(unsigned long address)
21938 - pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21939 + pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21942 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21944 print_ftrace_graph_addr(unsigned long addr, void *data,
21945 const struct stacktrace_ops *ops,
21946 - struct thread_info *tinfo, int *graph)
21947 + struct task_struct *task, int *graph)
21949 - struct task_struct *task;
21950 unsigned long ret_addr;
21953 if (addr != (unsigned long)return_to_handler)
21956 - task = tinfo->task;
21957 index = task->curr_ret_stack;
21959 if (!task->ret_stack || index < *graph)
21960 @@ -68,7 +69,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21962 print_ftrace_graph_addr(unsigned long addr, void *data,
21963 const struct stacktrace_ops *ops,
21964 - struct thread_info *tinfo, int *graph)
21965 + struct task_struct *task, int *graph)
21969 @@ -79,10 +80,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21970 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21973 -static inline int valid_stack_ptr(struct thread_info *tinfo,
21974 - void *p, unsigned int size, void *end)
21975 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21979 if (p < end && p >= (end-THREAD_SIZE))
21981 @@ -93,14 +92,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21985 -print_context_stack(struct thread_info *tinfo,
21986 +print_context_stack(struct task_struct *task, void *stack_start,
21987 unsigned long *stack, unsigned long bp,
21988 const struct stacktrace_ops *ops, void *data,
21989 unsigned long *end, int *graph)
21991 struct stack_frame *frame = (struct stack_frame *)bp;
21993 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21994 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21995 unsigned long addr;
21998 @@ -112,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
22000 ops->address(data, addr, 0);
22002 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22003 + print_ftrace_graph_addr(addr, data, ops, task, graph);
22007 @@ -121,7 +120,7 @@ print_context_stack(struct thread_info *tinfo,
22008 EXPORT_SYMBOL_GPL(print_context_stack);
22011 -print_context_stack_bp(struct thread_info *tinfo,
22012 +print_context_stack_bp(struct task_struct *task, void *stack_start,
22013 unsigned long *stack, unsigned long bp,
22014 const struct stacktrace_ops *ops, void *data,
22015 unsigned long *end, int *graph)
22016 @@ -129,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22017 struct stack_frame *frame = (struct stack_frame *)bp;
22018 unsigned long *ret_addr = &frame->return_address;
22020 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22021 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22022 unsigned long addr = *ret_addr;
22024 if (!__kernel_text_address(addr))
22025 @@ -138,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22026 ops->address(data, addr, 1);
22027 frame = frame->next_frame;
22028 ret_addr = &frame->return_address;
22029 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22030 + print_ftrace_graph_addr(addr, data, ops, task, graph);
22033 return (unsigned long)frame;
22034 @@ -226,6 +225,8 @@ unsigned long oops_begin(void)
22035 EXPORT_SYMBOL_GPL(oops_begin);
22036 NOKPROBE_SYMBOL(oops_begin);
22038 +extern void gr_handle_kernel_exploit(void);
22040 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22042 if (regs && kexec_should_crash(current))
22043 @@ -247,7 +248,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22044 panic("Fatal exception in interrupt");
22046 panic("Fatal exception");
22049 + gr_handle_kernel_exploit();
22051 + do_group_exit(signr);
22053 NOKPROBE_SYMBOL(oops_end);
22055 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22056 index 464ffd6..01f2cda 100644
22057 --- a/arch/x86/kernel/dumpstack_32.c
22058 +++ b/arch/x86/kernel/dumpstack_32.c
22059 @@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22060 bp = stack_frame(task, regs);
22063 - struct thread_info *context;
22064 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22067 end_stack = is_hardirq_stack(stack, cpu);
22069 end_stack = is_softirq_stack(stack, cpu);
22071 - context = task_thread_info(task);
22072 - bp = ops->walk_stack(context, stack, bp, ops, data,
22073 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22074 end_stack, &graph);
22076 /* Stop if not on irq stack */
22077 @@ -137,16 +136,17 @@ void show_regs(struct pt_regs *regs)
22078 unsigned int code_len = code_bytes;
22081 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22083 pr_emerg("Stack:\n");
22084 show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG);
22088 - ip = (u8 *)regs->ip - code_prologue;
22089 + ip = (u8 *)regs->ip - code_prologue + cs_base;
22090 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22091 /* try starting at IP */
22092 - ip = (u8 *)regs->ip;
22093 + ip = (u8 *)regs->ip + cs_base;
22094 code_len = code_len - code_prologue + 1;
22096 for (i = 0; i < code_len; i++, ip++) {
22097 @@ -155,7 +155,7 @@ void show_regs(struct pt_regs *regs)
22098 pr_cont(" Bad EIP value.");
22101 - if (ip == (u8 *)regs->ip)
22102 + if (ip == (u8 *)regs->ip + cs_base)
22103 pr_cont(" <%02x>", c);
22105 pr_cont(" %02x", c);
22106 @@ -168,6 +168,7 @@ int is_valid_bugaddr(unsigned long ip)
22108 unsigned short ud2;
22110 + ip = ktla_ktva(ip);
22111 if (ip < PAGE_OFFSET)
22113 if (probe_kernel_address((unsigned short *)ip, ud2))
22114 @@ -175,3 +176,15 @@ int is_valid_bugaddr(unsigned long ip)
22116 return ud2 == 0x0b0f;
22119 +#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22120 +void __used pax_check_alloca(unsigned long size)
22122 + unsigned long sp = (unsigned long)&sp, stack_left;
22124 + /* all kernel stacks are of the same size */
22125 + stack_left = sp & (THREAD_SIZE - 1);
22126 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
22128 +EXPORT_SYMBOL(pax_check_alloca);
22130 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22131 index 5f1c626..1cba97e 100644
22132 --- a/arch/x86/kernel/dumpstack_64.c
22133 +++ b/arch/x86/kernel/dumpstack_64.c
22134 @@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22135 const struct stacktrace_ops *ops, void *data)
22137 const unsigned cpu = get_cpu();
22138 - struct thread_info *tinfo;
22139 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22140 unsigned long dummy;
22144 + void *stack_start;
22148 @@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22149 * current stack address. If the stacks consist of nested
22152 - tinfo = task_thread_info(task);
22154 unsigned long *stack_end;
22155 enum stack_type stype;
22156 @@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22157 if (ops->stack(data, id) < 0)
22160 - bp = ops->walk_stack(tinfo, stack, bp, ops,
22161 + bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22162 data, stack_end, &graph);
22163 ops->stack(data, "<EOE>");
22165 @@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22166 * second-to-last pointer (index -2 to end) in the
22169 + if ((u16)stack_end[-1] != __KERNEL_DS)
22171 stack = (unsigned long *) stack_end[-2];
22174 @@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22176 if (ops->stack(data, "IRQ") < 0)
22178 - bp = ops->walk_stack(tinfo, stack, bp,
22179 + bp = ops->walk_stack(task, irq_stack, stack, bp,
22180 ops, data, stack_end, &graph);
22182 * We link to the next stack (which would be
22183 @@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22185 * This handles the process stack:
22187 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22188 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22189 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22193 EXPORT_SYMBOL(dump_trace);
22194 @@ -347,8 +350,55 @@ int is_valid_bugaddr(unsigned long ip)
22196 unsigned short ud2;
22198 - if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22199 + if (probe_kernel_address((unsigned short *)ip, ud2))
22202 return ud2 == 0x0b0f;
22205 +#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22206 +void __used pax_check_alloca(unsigned long size)
22208 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22209 + unsigned cpu, used;
22212 + /* check the process stack first */
22213 + stack_start = (unsigned long)task_stack_page(current);
22214 + stack_end = stack_start + THREAD_SIZE;
22215 + if (likely(stack_start <= sp && sp < stack_end)) {
22216 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
22217 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
22223 + /* check the irq stacks */
22224 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22225 + stack_start = stack_end - IRQ_STACK_SIZE;
22226 + if (stack_start <= sp && sp < stack_end) {
22227 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22229 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
22233 + /* check the exception stacks */
22235 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22236 + stack_start = stack_end - EXCEPTION_STKSZ;
22237 + if (stack_end && stack_start <= sp && sp < stack_end) {
22238 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22240 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
22246 + /* unknown stack */
22249 +EXPORT_SYMBOL(pax_check_alloca);
22251 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22252 index e2ce85d..00ccad0 100644
22253 --- a/arch/x86/kernel/e820.c
22254 +++ b/arch/x86/kernel/e820.c
22255 @@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22257 static void early_panic(char *msg)
22259 - early_printk(msg);
22261 + early_printk("%s", msg);
22262 + panic("%s", msg);
22265 static int userdef __initdata;
22266 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22267 index 89427d8..00c0d52 100644
22268 --- a/arch/x86/kernel/early_printk.c
22269 +++ b/arch/x86/kernel/early_printk.c
22271 #include <linux/pci_regs.h>
22272 #include <linux/pci_ids.h>
22273 #include <linux/errno.h>
22274 +#include <linux/sched.h>
22275 #include <asm/io.h>
22276 #include <asm/processor.h>
22277 #include <asm/fcntl.h>
22278 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22279 index 1c30976..71b41b9 100644
22280 --- a/arch/x86/kernel/entry_32.S
22281 +++ b/arch/x86/kernel/entry_32.S
22282 @@ -177,13 +177,154 @@
22283 /*CFI_REL_OFFSET gs, PT_GS*/
22285 .macro SET_KERNEL_GS reg
22287 +#ifdef CONFIG_CC_STACKPROTECTOR
22288 movl $(__KERNEL_STACK_CANARY), \reg
22289 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22290 + movl $(__USER_DS), \reg
22298 #endif /* CONFIG_X86_32_LAZY_GS */
22301 +.macro pax_enter_kernel
22302 +#ifdef CONFIG_PAX_KERNEXEC
22303 + call pax_enter_kernel
22307 +.macro pax_exit_kernel
22308 +#ifdef CONFIG_PAX_KERNEXEC
22309 + call pax_exit_kernel
22313 +#ifdef CONFIG_PAX_KERNEXEC
22314 +ENTRY(pax_enter_kernel)
22315 +#ifdef CONFIG_PARAVIRT
22318 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22323 + bts $X86_CR0_WP_BIT, %esi
22326 + cmp $__KERNEL_CS, %esi
22328 + ljmp $__KERNEL_CS, $3f
22329 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22331 +#ifdef CONFIG_PARAVIRT
22333 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22338 +#ifdef CONFIG_PARAVIRT
22343 +ENDPROC(pax_enter_kernel)
22345 +ENTRY(pax_exit_kernel)
22346 +#ifdef CONFIG_PARAVIRT
22351 + cmp $__KERNEXEC_KERNEL_CS, %esi
22353 +#ifdef CONFIG_PARAVIRT
22354 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22359 + btr $X86_CR0_WP_BIT, %esi
22360 + ljmp $__KERNEL_CS, $1f
22362 +#ifdef CONFIG_PARAVIRT
22364 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22369 +#ifdef CONFIG_PARAVIRT
22374 +ENDPROC(pax_exit_kernel)
22377 + .macro pax_erase_kstack
22378 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22379 + call pax_erase_kstack
22383 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22385 + * ebp: thread_info
22387 +ENTRY(pax_erase_kstack)
22392 + mov TI_lowest_stack(%ebp), %edi
22393 + mov $-0xBEEF, %eax
22397 + and $THREAD_SIZE_asm - 1, %ecx
22415 + cmp $THREAD_SIZE_asm, %ecx
22423 + mov TI_task_thread_sp0(%ebp), %edi
22425 + mov %edi, TI_lowest_stack(%ebp)
22431 +ENDPROC(pax_erase_kstack)
22434 +.macro __SAVE_ALL _DS
22438 @@ -206,7 +347,7 @@
22439 CFI_REL_OFFSET ecx, 0
22441 CFI_REL_OFFSET ebx, 0
22442 - movl $(__USER_DS), %edx
22446 movl $(__KERNEL_PERCPU), %edx
22447 @@ -214,6 +355,15 @@
22452 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22453 + __SAVE_ALL __KERNEL_DS
22456 + __SAVE_ALL __USER_DS
22460 .macro RESTORE_INT_REGS
22463 @@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22467 -END(ret_from_fork)
22468 +ENDPROC(ret_from_fork)
22470 ENTRY(ret_from_kernel_thread)
22472 @@ -340,7 +490,15 @@ ret_from_intr:
22473 andl $SEGMENT_RPL_MASK, %eax
22475 cmpl $USER_RPL, %eax
22477 +#ifdef CONFIG_PAX_KERNEXEC
22478 + jae resume_userspace
22481 + jmp resume_kernel
22483 jb resume_kernel # not returning to v8086 or userspace
22486 ENTRY(resume_userspace)
22488 @@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22489 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22490 # int/exception return?
22493 -END(ret_from_exception)
22494 + jmp restore_all_pax
22495 +ENDPROC(ret_from_exception)
22497 #ifdef CONFIG_PREEMPT
22498 ENTRY(resume_kernel)
22499 @@ -365,7 +523,7 @@ need_resched:
22501 call preempt_schedule_irq
22503 -END(resume_kernel)
22504 +ENDPROC(resume_kernel)
22508 @@ -395,33 +553,45 @@ sysenter_past_esp:
22509 /*CFI_REL_OFFSET cs, 0*/
22511 * Push current_thread_info()->sysenter_return to the stack.
22512 - * A tiny bit of offset fixup is necessary: TI_sysenter_return
22513 - * is relative to thread_info, which is at the bottom of the
22514 - * kernel stack page. 4*4 means the 4 words pushed above;
22515 - * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
22516 - * and THREAD_SIZE takes us to the bottom.
22518 - pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
22520 CFI_REL_OFFSET eip, 0
22524 + GET_THREAD_INFO(%ebp)
22525 + movl TI_sysenter_return(%ebp),%ebp
22526 + movl %ebp,PT_EIP(%esp)
22527 ENABLE_INTERRUPTS(CLBR_NONE)
22530 * Load the potential sixth argument from user stack.
22531 * Careful about security.
22533 + movl PT_OLDESP(%esp),%ebp
22535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22536 + mov PT_OLDSS(%esp),%ds
22537 +1: movl %ds:(%ebp),%ebp
22541 cmpl $__PAGE_OFFSET-3,%ebp
22544 1: movl (%ebp),%ebp
22548 movl %ebp,PT_EBP(%esp)
22549 _ASM_EXTABLE(1b,syscall_fault)
22551 GET_THREAD_INFO(%ebp)
22553 +#ifdef CONFIG_PAX_RANDKSTACK
22557 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22560 @@ -437,12 +607,24 @@ sysenter_after_call:
22561 testl $_TIF_ALLWORK_MASK, %ecx
22565 +#ifdef CONFIG_PAX_RANDKSTACK
22568 + call pax_randomize_kstack
22574 /* if something modifies registers it must also disable sysexit */
22575 movl PT_EIP(%esp), %edx
22576 movl PT_OLDESP(%esp), %ecx
22579 1: mov PT_FS(%esp), %fs
22580 +2: mov PT_DS(%esp), %ds
22581 +3: mov PT_ES(%esp), %es
22583 ENABLE_INTERRUPTS_SYSEXIT
22585 @@ -456,6 +638,9 @@ sysenter_audit:
22586 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22587 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22588 call __audit_syscall_entry
22592 popl_cfi %ecx /* get that remapped edx off the stack */
22593 popl_cfi %ecx /* get that remapped esi off the stack */
22594 movl PT_EAX(%esp),%eax /* reload syscall number */
22595 @@ -482,10 +667,16 @@ sysexit_audit:
22598 .pushsection .fixup,"ax"
22599 -2: movl $0,PT_FS(%esp)
22600 +4: movl $0,PT_FS(%esp)
22602 +5: movl $0,PT_DS(%esp)
22604 +6: movl $0,PT_ES(%esp)
22607 - _ASM_EXTABLE(1b,2b)
22608 + _ASM_EXTABLE(1b,4b)
22609 + _ASM_EXTABLE(2b,5b)
22610 + _ASM_EXTABLE(3b,6b)
22612 ENDPROC(ia32_sysenter_target)
22614 @@ -496,6 +687,11 @@ ENTRY(system_call)
22615 pushl_cfi %eax # save orig_eax
22617 GET_THREAD_INFO(%ebp)
22619 +#ifdef CONFIG_PAX_RANDKSTACK
22623 # system call tracing in operation / emulation
22624 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22625 jnz syscall_trace_entry
22626 @@ -515,6 +711,15 @@ syscall_exit:
22627 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22628 jnz syscall_exit_work
22632 +#ifdef CONFIG_PAX_RANDKSTACK
22634 + call pax_randomize_kstack
22641 restore_all_notrace:
22642 @@ -569,14 +774,34 @@ ldt_ss:
22643 * compensating for the offset by changing to the ESPFIX segment with
22644 * a base address that matches for the difference.
22646 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22647 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22648 mov %esp, %edx /* load kernel esp */
22649 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22650 mov %dx, %ax /* eax: new kernel esp */
22651 sub %eax, %edx /* offset (low word is 0) */
22653 + movl PER_CPU_VAR(cpu_number), %ebx
22654 + shll $PAGE_SHIFT_asm, %ebx
22655 + addl $cpu_gdt_table, %ebx
22657 + movl $cpu_gdt_table, %ebx
22660 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22661 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22663 +#ifdef CONFIG_PAX_KERNEXEC
22665 + btr $X86_CR0_WP_BIT, %esi
22669 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22670 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22672 +#ifdef CONFIG_PAX_KERNEXEC
22673 + bts $X86_CR0_WP_BIT, %esi
22677 pushl_cfi $__ESPFIX_SS
22678 pushl_cfi %eax /* new kernel esp */
22679 /* Disable interrupts, but do not irqtrace this section: we
22680 @@ -606,20 +831,18 @@ work_resched:
22681 movl TI_flags(%ebp), %ecx
22682 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22683 # than syscall tracing?
22685 + jz restore_all_pax
22686 testb $_TIF_NEED_RESCHED, %cl
22689 work_notifysig: # deal with pending signals and
22690 # notify-resume requests
22693 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22695 jnz work_notifysig_v86 # returning to kernel-space or
22702 ENABLE_INTERRUPTS(CLBR_NONE)
22703 @@ -640,7 +863,7 @@ work_notifysig_v86:
22708 +ENDPROC(work_pending)
22710 # perform syscall exit tracing
22712 @@ -648,11 +871,14 @@ syscall_trace_entry:
22713 movl $-ENOSYS,PT_EAX(%esp)
22715 call syscall_trace_enter
22719 /* What it returned is what we'll actually use. */
22720 cmpl $(NR_syscalls), %eax
22723 -END(syscall_trace_entry)
22724 +ENDPROC(syscall_trace_entry)
22726 # perform syscall exit tracing
22728 @@ -665,26 +891,30 @@ syscall_exit_work:
22730 call syscall_trace_leave
22731 jmp resume_userspace
22732 -END(syscall_exit_work)
22733 +ENDPROC(syscall_exit_work)
22736 RING0_INT_FRAME # can't unwind into user space anyway
22738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22743 GET_THREAD_INFO(%ebp)
22744 movl $-EFAULT,PT_EAX(%esp)
22745 jmp resume_userspace
22746 -END(syscall_fault)
22747 +ENDPROC(syscall_fault)
22751 jmp syscall_after_call
22752 -END(syscall_badsys)
22753 +ENDPROC(syscall_badsys)
22757 jmp sysenter_after_call
22758 -END(sysenter_badsys)
22759 +ENDPROC(sysenter_badsys)
22762 .macro FIXUP_ESPFIX_STACK
22763 @@ -697,8 +927,15 @@ END(sysenter_badsys)
22765 #ifdef CONFIG_X86_ESPFIX32
22766 /* fixup the stack */
22767 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22768 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22770 + movl PER_CPU_VAR(cpu_number), %ebx
22771 + shll $PAGE_SHIFT_asm, %ebx
22772 + addl $cpu_gdt_table, %ebx
22774 + movl $cpu_gdt_table, %ebx
22776 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22777 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22779 addl %esp, %eax /* the adjusted stack pointer */
22780 pushl_cfi $__KERNEL_DS
22781 @@ -737,7 +974,7 @@ ENTRY(irq_entries_start)
22782 CFI_ADJUST_CFA_OFFSET -4
22785 -END(irq_entries_start)
22786 +ENDPROC(irq_entries_start)
22789 * the CPU automatically disables interrupts when executing an IRQ vector,
22790 @@ -790,7 +1027,7 @@ ENTRY(coprocessor_error)
22791 pushl_cfi $do_coprocessor_error
22794 -END(coprocessor_error)
22795 +ENDPROC(coprocessor_error)
22797 ENTRY(simd_coprocessor_error)
22799 @@ -806,7 +1043,7 @@ ENTRY(simd_coprocessor_error)
22803 -END(simd_coprocessor_error)
22804 +ENDPROC(simd_coprocessor_error)
22806 ENTRY(device_not_available)
22808 @@ -815,18 +1052,18 @@ ENTRY(device_not_available)
22809 pushl_cfi $do_device_not_available
22812 -END(device_not_available)
22813 +ENDPROC(device_not_available)
22815 #ifdef CONFIG_PARAVIRT
22818 _ASM_EXTABLE(native_iret, iret_exc)
22820 +ENDPROC(native_iret)
22822 ENTRY(native_irq_enable_sysexit)
22825 -END(native_irq_enable_sysexit)
22826 +ENDPROC(native_irq_enable_sysexit)
22830 @@ -836,7 +1073,7 @@ ENTRY(overflow)
22831 pushl_cfi $do_overflow
22839 @@ -845,7 +1082,7 @@ ENTRY(bounds)
22840 pushl_cfi $do_bounds
22848 @@ -854,7 +1091,7 @@ ENTRY(invalid_op)
22849 pushl_cfi $do_invalid_op
22853 +ENDPROC(invalid_op)
22855 ENTRY(coprocessor_segment_overrun)
22857 @@ -863,7 +1100,7 @@ ENTRY(coprocessor_segment_overrun)
22858 pushl_cfi $do_coprocessor_segment_overrun
22861 -END(coprocessor_segment_overrun)
22862 +ENDPROC(coprocessor_segment_overrun)
22866 @@ -871,7 +1108,7 @@ ENTRY(invalid_TSS)
22867 pushl_cfi $do_invalid_TSS
22871 +ENDPROC(invalid_TSS)
22873 ENTRY(segment_not_present)
22875 @@ -879,7 +1116,7 @@ ENTRY(segment_not_present)
22876 pushl_cfi $do_segment_not_present
22879 -END(segment_not_present)
22880 +ENDPROC(segment_not_present)
22882 ENTRY(stack_segment)
22884 @@ -887,7 +1124,7 @@ ENTRY(stack_segment)
22885 pushl_cfi $do_stack_segment
22888 -END(stack_segment)
22889 +ENDPROC(stack_segment)
22891 ENTRY(alignment_check)
22893 @@ -895,7 +1132,7 @@ ENTRY(alignment_check)
22894 pushl_cfi $do_alignment_check
22897 -END(alignment_check)
22898 +ENDPROC(alignment_check)
22900 ENTRY(divide_error)
22902 @@ -904,7 +1141,7 @@ ENTRY(divide_error)
22903 pushl_cfi $do_divide_error
22907 +ENDPROC(divide_error)
22909 #ifdef CONFIG_X86_MCE
22910 ENTRY(machine_check)
22911 @@ -914,7 +1151,7 @@ ENTRY(machine_check)
22912 pushl_cfi machine_check_vector
22915 -END(machine_check)
22916 +ENDPROC(machine_check)
22919 ENTRY(spurious_interrupt_bug)
22920 @@ -924,7 +1161,7 @@ ENTRY(spurious_interrupt_bug)
22921 pushl_cfi $do_spurious_interrupt_bug
22924 -END(spurious_interrupt_bug)
22925 +ENDPROC(spurious_interrupt_bug)
22928 /* Xen doesn't set %esp to be precisely what the normal sysenter
22929 @@ -1033,7 +1270,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22936 ENTRY(ftrace_caller)
22938 @@ -1063,7 +1300,7 @@ ftrace_graph_call:
22942 -END(ftrace_caller)
22943 +ENDPROC(ftrace_caller)
22945 ENTRY(ftrace_regs_caller)
22946 pushf /* push flags before compare (in cs location) */
22947 @@ -1161,7 +1398,7 @@ trace:
22953 #endif /* CONFIG_DYNAMIC_FTRACE */
22954 #endif /* CONFIG_FUNCTION_TRACER */
22956 @@ -1179,7 +1416,7 @@ ENTRY(ftrace_graph_caller)
22960 -END(ftrace_graph_caller)
22961 +ENDPROC(ftrace_graph_caller)
22963 .globl return_to_handler
22965 @@ -1233,15 +1470,18 @@ error_code:
22966 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22969 - movl $(__USER_DS), %ecx
22970 + movl $(__KERNEL_DS), %ecx
22977 movl %esp,%eax # pt_regs pointer
22979 jmp ret_from_exception
22982 +ENDPROC(page_fault)
22985 * Debug traps and NMI can happen at the one SYSENTER instruction
22986 @@ -1284,7 +1524,7 @@ debug_stack_correct:
22988 jmp ret_from_exception
22994 * NMI is doubly nasty. It can happen _while_ we're handling
22995 @@ -1324,6 +1564,9 @@ nmi_stack_correct:
22996 xorl %edx,%edx # zero error code
22997 movl %esp,%eax # pt_regs pointer
23002 jmp restore_all_notrace
23005 @@ -1361,13 +1604,16 @@ nmi_espfix_stack:
23006 FIXUP_ESPFIX_STACK # %eax == %esp
23007 xorl %edx,%edx # zero error code
23013 lss 12+4(%esp), %esp # back to espfix stack
23014 CFI_ADJUST_CFA_OFFSET -24
23023 @@ -1380,14 +1626,14 @@ ENTRY(int3)
23025 jmp ret_from_exception
23030 ENTRY(general_protection)
23032 pushl_cfi $do_general_protection
23035 -END(general_protection)
23036 +ENDPROC(general_protection)
23038 #ifdef CONFIG_KVM_GUEST
23039 ENTRY(async_page_fault)
23040 @@ -1396,6 +1642,6 @@ ENTRY(async_page_fault)
23041 pushl_cfi $do_async_page_fault
23044 -END(async_page_fault)
23045 +ENDPROC(async_page_fault)
23048 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23049 index 02c2eff..bd9cb4d 100644
23050 --- a/arch/x86/kernel/entry_64.S
23051 +++ b/arch/x86/kernel/entry_64.S
23053 #include <asm/smap.h>
23054 #include <asm/pgtable_types.h>
23055 #include <linux/err.h>
23056 +#include <asm/pgtable.h>
23057 +#include <asm/alternative-asm.h>
23059 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23060 #include <linux/elf-em.h>
23061 @@ -64,6 +66,401 @@ ENTRY(native_usergs_sysret64)
23062 ENDPROC(native_usergs_sysret64)
23063 #endif /* CONFIG_PARAVIRT */
23065 + .macro ljmpq sel, off
23066 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23067 + .byte 0x48; ljmp *1234f(%rip)
23068 + .pushsection .rodata
23070 + 1234: .quad \off; .word \sel
23079 + .macro pax_enter_kernel
23080 + pax_set_fptr_mask
23081 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23082 + call pax_enter_kernel
23086 + .macro pax_exit_kernel
23087 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23088 + call pax_exit_kernel
23093 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23094 +ENTRY(pax_enter_kernel)
23097 +#ifdef CONFIG_PARAVIRT
23098 + PV_SAVE_REGS(CLBR_RDI)
23101 +#ifdef CONFIG_PAX_KERNEXEC
23103 + bts $X86_CR0_WP_BIT,%rdi
23106 + cmp $__KERNEL_CS,%edi
23111 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23112 + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
23116 + mov $__KERNEL_DS,%edi
23122 +113: sub $4097,%rdi
23125 + mov $__UDEREF_KERNEL_DS,%edi
23130 +#ifdef CONFIG_PARAVIRT
23131 + PV_RESTORE_REGS(CLBR_RDI)
23135 + pax_force_retaddr
23138 +#ifdef CONFIG_PAX_KERNEXEC
23139 +2: ljmpq __KERNEL_CS,1b
23140 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
23141 +4: SET_RDI_INTO_CR0
23144 +ENDPROC(pax_enter_kernel)
23146 +ENTRY(pax_exit_kernel)
23149 +#ifdef CONFIG_PARAVIRT
23150 + PV_SAVE_REGS(CLBR_RDI)
23153 +#ifdef CONFIG_PAX_KERNEXEC
23155 + cmp $__KERNEXEC_KERNEL_CS,%edi
23158 + bts $X86_CR0_WP_BIT,%rdi
23163 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23164 + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
23166 + cmp $__UDEREF_KERNEL_DS,%edi
23172 +112: add $4097,%rdi
23175 + mov $__KERNEL_DS,%edi
23180 +#ifdef CONFIG_PARAVIRT
23181 + PV_RESTORE_REGS(CLBR_RDI);
23185 + pax_force_retaddr
23188 +#ifdef CONFIG_PAX_KERNEXEC
23189 +2: GET_CR0_INTO_RDI
23190 + btr $X86_CR0_WP_BIT,%rdi
23192 + ljmpq __KERNEL_CS,3f
23193 +3: SET_RDI_INTO_CR0
23198 +ENDPROC(pax_exit_kernel)
23201 + .macro pax_enter_kernel_user
23202 + pax_set_fptr_mask
23203 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23204 + call pax_enter_kernel_user
23208 + .macro pax_exit_kernel_user
23209 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23210 + call pax_exit_kernel_user
23212 +#ifdef CONFIG_PAX_RANDKSTACK
23215 + call pax_randomize_kstack
23221 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23222 +ENTRY(pax_enter_kernel_user)
23226 +#ifdef CONFIG_PARAVIRT
23227 + PV_SAVE_REGS(CLBR_RDI)
23230 + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
23242 + add $__START_KERNEL_map,%rbx
23243 + sub phys_base(%rip),%rbx
23245 +#ifdef CONFIG_PARAVIRT
23246 + cmpl $0, pv_info+PARAVIRT_enabled
23250 + .rept USER_PGD_PTRS
23251 + mov i*8(%rbx),%rsi
23253 + lea i*8(%rbx),%rdi
23254 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23263 + .rept USER_PGD_PTRS
23264 + movb $0,i*8(%rbx)
23268 +2: SET_RDI_INTO_CR3
23270 +#ifdef CONFIG_PAX_KERNEXEC
23272 + bts $X86_CR0_WP_BIT,%rdi
23278 +#ifdef CONFIG_PARAVIRT
23279 + PV_RESTORE_REGS(CLBR_RDI)
23284 + pax_force_retaddr
23287 +ENDPROC(pax_enter_kernel_user)
23289 +ENTRY(pax_exit_kernel_user)
23293 +#ifdef CONFIG_PARAVIRT
23294 + PV_SAVE_REGS(CLBR_RDI)
23298 + ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID
23309 +#ifdef CONFIG_PAX_KERNEXEC
23311 + btr $X86_CR0_WP_BIT,%rdi
23316 + add $__START_KERNEL_map,%rbx
23317 + sub phys_base(%rip),%rbx
23319 +#ifdef CONFIG_PARAVIRT
23320 + cmpl $0, pv_info+PARAVIRT_enabled
23323 + .rept USER_PGD_PTRS
23324 + mov i*8(%rbx),%rsi
23326 + lea i*8(%rbx),%rdi
23327 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23335 + .rept USER_PGD_PTRS
23336 + movb $0x67,i*8(%rbx)
23341 +#ifdef CONFIG_PARAVIRT
23342 + PV_RESTORE_REGS(CLBR_RDI)
23347 + pax_force_retaddr
23350 +ENDPROC(pax_exit_kernel_user)
23353 + .macro pax_enter_kernel_nmi
23354 + pax_set_fptr_mask
23356 +#ifdef CONFIG_PAX_KERNEXEC
23358 + bts $X86_CR0_WP_BIT,%rdi
23365 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23366 + ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
23374 + mov $__UDEREF_KERNEL_DS,%edi
23380 + .macro pax_exit_kernel_nmi
23381 +#ifdef CONFIG_PAX_KERNEXEC
23385 + btr $X86_CR0_WP_BIT,%rdi
23390 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23397 + mov $__KERNEL_DS,%edi
23403 + .macro pax_erase_kstack
23404 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23405 + call pax_erase_kstack
23409 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23410 +ENTRY(pax_erase_kstack)
23416 + GET_THREAD_INFO(%r11)
23417 + mov TI_lowest_stack(%r11), %rdi
23418 + mov $-0xBEEF, %rax
23422 + and $THREAD_SIZE_asm - 1, %ecx
23440 + cmp $THREAD_SIZE_asm, %rcx
23448 + mov TI_task_thread_sp0(%r11), %rdi
23450 + mov %rdi, TI_lowest_stack(%r11)
23456 + pax_force_retaddr
23458 +ENDPROC(pax_erase_kstack)
23461 .macro TRACE_IRQS_IRETQ
23462 #ifdef CONFIG_TRACE_IRQFLAGS
23463 @@ -100,7 +497,7 @@ ENDPROC(native_usergs_sysret64)
23466 .macro TRACE_IRQS_IRETQ_DEBUG
23467 - bt $9,EFLAGS(%rsp) /* interrupts off? */
23468 + bt $X86_EFLAGS_IF_BIT,EFLAGS(%rsp) /* interrupts off? */
23470 TRACE_IRQS_ON_DEBUG
23472 @@ -221,14 +618,6 @@ GLOBAL(system_call_after_swapgs)
23473 /* Construct struct pt_regs on stack */
23474 pushq_cfi $__USER_DS /* pt_regs->ss */
23475 pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
23477 - * Re-enable interrupts.
23478 - * We use 'rsp_scratch' as a scratch space, hence irq-off block above
23479 - * must execute atomically in the face of possible interrupt-driven
23480 - * task preemption. We must enable interrupts only after we're done
23481 - * with using rsp_scratch:
23483 - ENABLE_INTERRUPTS(CLBR_NONE)
23484 pushq_cfi %r11 /* pt_regs->flags */
23485 pushq_cfi $__USER_CS /* pt_regs->cs */
23486 pushq_cfi %rcx /* pt_regs->ip */
23487 @@ -246,7 +635,27 @@ GLOBAL(system_call_after_swapgs)
23488 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
23489 CFI_ADJUST_CFA_OFFSET 6*8
23491 - testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
23492 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
23493 + movq_cfi r12, R12
23496 + pax_enter_kernel_user
23498 +#ifdef CONFIG_PAX_RANDKSTACK
23503 + * Re-enable interrupts.
23504 + * We use 'rsp_scratch' as a scratch space, hence irq-off block above
23505 + * must execute atomically in the face of possible interrupt-driven
23506 + * task preemption. We must enable interrupts only after we're done
23507 + * with using rsp_scratch:
23509 + ENABLE_INTERRUPTS(CLBR_NONE)
23511 + GET_THREAD_INFO(%rcx)
23512 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23514 system_call_fastpath:
23515 #if __SYSCALL_MASK == ~0
23516 @@ -279,10 +688,13 @@ system_call_fastpath:
23517 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23520 - testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
23521 + GET_THREAD_INFO(%rcx)
23522 + testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23523 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
23526 + pax_exit_kernel_user
23529 RESTORE_C_REGS_EXCEPT_RCX_R11
23530 movq RIP(%rsp),%rcx
23531 @@ -316,6 +728,9 @@ tracesys:
23532 call syscall_trace_enter_phase1
23534 jnz tracesys_phase2 /* if needed, run the slow path */
23538 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
23539 movq ORIG_RAX(%rsp), %rax
23540 jmp system_call_fastpath /* and return to the fast path */
23541 @@ -327,6 +742,8 @@ tracesys_phase2:
23543 call syscall_trace_enter_phase2
23548 * Reload registers from stack in case ptrace changed them.
23549 * We don't reload %rax because syscall_trace_entry_phase2() returned
23550 @@ -364,6 +781,8 @@ GLOBAL(int_with_check)
23553 andl $~TS_COMPAT,TI_status(%rcx)
23554 + pax_exit_kernel_user
23558 /* Either reschedule or signal or syscall exit tracking needed. */
23559 @@ -485,7 +904,7 @@ opportunistic_sysret_failed:
23561 jmp restore_c_regs_and_iret
23564 +ENDPROC(system_call)
23567 .macro FORK_LIKE func
23568 @@ -495,7 +914,7 @@ ENTRY(stub_\func)
23573 +ENDPROC(stub_\func)
23577 @@ -519,7 +938,7 @@ return_from_execve:
23578 movq %rax,RAX(%rsp)
23579 jmp int_ret_from_sys_call
23582 +ENDPROC(stub_execve)
23584 * Remaining execve stubs are only 7 bytes long.
23585 * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
23586 @@ -531,7 +950,7 @@ GLOBAL(stub_execveat)
23588 jmp return_from_execve
23590 -END(stub_execveat)
23591 +ENDPROC(stub_execveat)
23593 #ifdef CONFIG_X86_X32_ABI
23595 @@ -541,7 +960,7 @@ GLOBAL(stub_x32_execve)
23596 call compat_sys_execve
23597 jmp return_from_execve
23599 -END(stub_x32_execve)
23600 +ENDPROC(stub_x32_execve)
23602 GLOBAL(stub_x32_execveat)
23604 @@ -549,7 +968,7 @@ GLOBAL(stub_x32_execveat)
23605 call compat_sys_execveat
23606 jmp return_from_execve
23608 -END(stub_x32_execveat)
23609 +ENDPROC(stub_x32_execveat)
23612 #ifdef CONFIG_IA32_EMULATION
23613 @@ -592,7 +1011,7 @@ return_from_stub:
23614 movq %rax,RAX(%rsp)
23615 jmp int_ret_from_sys_call
23617 -END(stub_rt_sigreturn)
23618 +ENDPROC(stub_rt_sigreturn)
23620 #ifdef CONFIG_X86_X32_ABI
23621 ENTRY(stub_x32_rt_sigreturn)
23622 @@ -602,7 +1021,7 @@ ENTRY(stub_x32_rt_sigreturn)
23623 call sys32_x32_rt_sigreturn
23624 jmp return_from_stub
23626 -END(stub_x32_rt_sigreturn)
23627 +ENDPROC(stub_x32_rt_sigreturn)
23631 @@ -622,7 +1041,7 @@ ENTRY(ret_from_fork)
23635 - testl $3,CS(%rsp) # from kernel_thread?
23636 + testb $3,CS(%rsp) # from kernel_thread?
23639 * By the time we get here, we have no idea whether our pt_regs,
23640 @@ -641,7 +1060,7 @@ ENTRY(ret_from_fork)
23642 jmp int_ret_from_sys_call
23644 -END(ret_from_fork)
23645 +ENDPROC(ret_from_fork)
23648 * Build the entry stubs with some assembler magic.
23649 @@ -659,7 +1078,7 @@ ENTRY(irq_entries_start)
23653 -END(irq_entries_start)
23654 +ENDPROC(irq_entries_start)
23657 * Interrupt entry/exit.
23658 @@ -672,21 +1091,13 @@ END(irq_entries_start)
23659 /* 0(%rsp): ~(interrupt number) */
23660 .macro interrupt func
23663 - * Since nothing in interrupt handling code touches r12...r15 members
23664 - * of "struct pt_regs", and since interrupts can nest, we can save
23665 - * four stack slots and simultaneously provide
23666 - * an unwind-friendly stack layout by saving "truncated" pt_regs
23667 - * exactly up to rbp slot, without these members.
23669 - ALLOC_PT_GPREGS_ON_STACK -RBP
23671 - /* this goes to 0(%rsp) for unwinder, not for saving the value: */
23672 - SAVE_EXTRA_REGS_RBP -RBP
23673 + ALLOC_PT_GPREGS_ON_STACK
23677 - leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */
23678 + movq %rsp,%rdi /* arg1 for \func (pointer to pt_regs) */
23680 - testl $3, CS-RBP(%rsp)
23681 + testb $3, CS(%rsp)
23685 @@ -709,8 +1120,20 @@ END(irq_entries_start)
23686 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
23687 0x77 /* DW_OP_breg7 (rsp) */, 0, \
23688 0x06 /* DW_OP_deref */, \
23689 - 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
23690 + 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS, \
23691 0x22 /* DW_OP_plus */
23693 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23694 + testb $3, CS(%rdi)
23698 +1: pax_enter_kernel_user
23704 /* We entered an interrupt context - irqs are off: */
23707 @@ -735,13 +1158,12 @@ ret_from_intr:
23709 /* Restore saved previous stack */
23711 - CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
23712 - /* return code expects complete pt_regs - adjust rsp accordingly: */
23713 - leaq -RBP(%rsi),%rsp
23714 + CFI_DEF_CFA rsi,SIZEOF_PTREGS /* reg/off reset after def_cfa_expr */
23716 CFI_DEF_CFA_REGISTER rsp
23717 - CFI_ADJUST_CFA_OFFSET RBP
23718 + CFI_ADJUST_CFA_OFFSET 0
23720 - testl $3,CS(%rsp)
23721 + testb $3,CS(%rsp)
23723 /* Interrupt came from user space */
23725 @@ -763,6 +1185,8 @@ retint_swapgs: /* return to user-space */
23726 * The iretq could re-enable interrupts:
23728 DISABLE_INTERRUPTS(CLBR_ANY)
23729 + pax_exit_kernel_user
23730 +# pax_erase_kstack
23734 @@ -781,6 +1205,21 @@ retint_kernel:
23741 +#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23742 + /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23743 + * namely calling EFI runtime services with a phys mapping. We're
23744 + * starting off with NOPs and patch in the real instrumentation
23745 + * (BTS/OR) before starting any userland process; even before starting
23748 + ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS
23750 + pax_force_retaddr RIP
23754 * The iretq could re-enable interrupts:
23756 @@ -793,8 +1232,6 @@ retint_kernel:
23757 restore_c_regs_and_iret:
23759 REMOVE_PT_GPREGS_FROM_STACK 8
23765 @@ -824,15 +1261,15 @@ native_irq_return_ldt:
23767 movq PER_CPU_VAR(espfix_waddr),%rdi
23768 movq %rax,(0*8)(%rdi) /* RAX */
23769 - movq (2*8)(%rsp),%rax /* RIP */
23770 + movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23771 movq %rax,(1*8)(%rdi)
23772 - movq (3*8)(%rsp),%rax /* CS */
23773 + movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23774 movq %rax,(2*8)(%rdi)
23775 - movq (4*8)(%rsp),%rax /* RFLAGS */
23776 + movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23777 movq %rax,(3*8)(%rdi)
23778 - movq (6*8)(%rsp),%rax /* SS */
23779 + movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23780 movq %rax,(5*8)(%rdi)
23781 - movq (5*8)(%rsp),%rax /* RSP */
23782 + movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23783 movq %rax,(4*8)(%rdi)
23784 andl $0xffff0000,%eax
23786 @@ -875,7 +1312,7 @@ retint_signal:
23787 jmp retint_with_reschedule
23790 -END(common_interrupt)
23791 +ENDPROC(common_interrupt)
23795 @@ -889,7 +1326,7 @@ ENTRY(\sym)
23803 #ifdef CONFIG_TRACING
23804 @@ -962,7 +1399,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23806 * Exception entry points.
23808 -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
23809 +#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23811 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23813 @@ -1018,6 +1455,12 @@ ENTRY(\sym)
23816 .if \shift_ist != -1
23818 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23819 + lea cpu_tss(%r13), %r13
23821 + lea cpu_tss(%rip), %r13
23823 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
23826 @@ -1065,7 +1508,7 @@ ENTRY(\sym)
23834 #ifdef CONFIG_TRACING
23835 @@ -1106,9 +1549,10 @@ gs_change:
23836 2: mfence /* workaround */
23839 + pax_force_retaddr
23842 -END(native_load_gs_index)
23843 +ENDPROC(native_load_gs_index)
23845 _ASM_EXTABLE(gs_change,bad_gs)
23846 .section .fixup,"ax"
23847 @@ -1136,9 +1580,10 @@ ENTRY(do_softirq_own_stack)
23848 CFI_DEF_CFA_REGISTER rsp
23849 CFI_ADJUST_CFA_OFFSET -8
23850 decl PER_CPU_VAR(irq_count)
23851 + pax_force_retaddr
23854 -END(do_softirq_own_stack)
23855 +ENDPROC(do_softirq_own_stack)
23858 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
23859 @@ -1179,7 +1624,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23863 -END(xen_do_hypervisor_callback)
23864 +ENDPROC(xen_do_hypervisor_callback)
23867 * Hypervisor uses this for application faults while it executes.
23868 @@ -1240,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
23872 -END(xen_failsafe_callback)
23873 +ENDPROC(xen_failsafe_callback)
23875 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23876 xen_hvm_callback_vector xen_evtchn_do_upcall
23877 @@ -1286,9 +1731,39 @@ ENTRY(paranoid_entry)
23878 js 1f /* negative -> in kernel */
23883 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23884 + testb $3, CS+8(%rsp)
23888 +1: pax_enter_kernel_user
23893 + pax_force_retaddr
23896 -END(paranoid_entry)
23897 +ENDPROC(paranoid_entry)
23899 +ENTRY(paranoid_entry_nmi)
23900 + XCPT_FRAME 1 15*8
23903 + SAVE_EXTRA_REGS 8
23905 + movl $MSR_GS_BASE,%ecx
23908 + js 1f /* negative -> in kernel */
23911 +1: pax_enter_kernel_nmi
23912 + pax_force_retaddr
23915 +ENDPROC(paranoid_entry_nmi)
23918 * "Paranoid" exit path from exception stack. This is invoked
23919 @@ -1305,20 +1780,27 @@ ENTRY(paranoid_exit)
23921 DISABLE_INTERRUPTS(CLBR_NONE)
23922 TRACE_IRQS_OFF_DEBUG
23923 - testl %ebx,%ebx /* swapgs needed? */
23924 + testl $1,%ebx /* swapgs needed? */
23925 jnz paranoid_exit_no_swapgs
23926 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23927 + pax_exit_kernel_user
23932 SWAPGS_UNSAFE_STACK
23933 jmp paranoid_exit_restore
23934 paranoid_exit_no_swapgs:
23936 TRACE_IRQS_IRETQ_DEBUG
23937 paranoid_exit_restore:
23940 REMOVE_PT_GPREGS_FROM_STACK 8
23941 + pax_force_retaddr_bts
23944 -END(paranoid_exit)
23945 +ENDPROC(paranoid_exit)
23948 * Save all registers in pt_regs, and switch gs if needed.
23949 @@ -1330,12 +1812,23 @@ ENTRY(error_entry)
23953 - testl $3,CS+8(%rsp)
23954 + testb $3,CS+8(%rsp)
23955 je error_kernelspace
23959 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23960 + testb $3, CS+8(%rsp)
23964 +1: pax_enter_kernel_user
23970 + pax_force_retaddr
23974 @@ -1370,7 +1863,7 @@ error_bad_iret:
23975 decl %ebx /* Return to usergs */
23979 +ENDPROC(error_entry)
23982 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
23983 @@ -1381,7 +1874,7 @@ ENTRY(error_exit)
23984 DISABLE_INTERRUPTS(CLBR_NONE)
23986 GET_THREAD_INFO(%rcx)
23990 LOCKDEP_SYS_EXIT_IRQ
23991 movl TI_flags(%rcx),%edx
23992 @@ -1390,7 +1883,7 @@ ENTRY(error_exit)
23997 +ENDPROC(error_exit)
23999 /* Runs on exception stack */
24001 @@ -1413,11 +1906,12 @@ ENTRY(nmi)
24002 * If the variable is not set and the stack is not the NMI
24004 * o Set the special variable on the stack
24005 - * o Copy the interrupt frame into a "saved" location on the stack
24006 - * o Copy the interrupt frame into a "copy" location on the stack
24007 + * o Copy the interrupt frame into an "outermost" location on the
24009 + * o Copy the interrupt frame into an "iret" location on the stack
24010 * o Continue processing the NMI
24011 * If the variable is set or the previous stack is the NMI stack:
24012 - * o Modify the "copy" location to jump to the repeate_nmi
24013 + * o Modify the "iret" location to jump to the repeat_nmi
24014 * o return back to the first NMI
24016 * Now on exit of the first NMI, we first clear the stack variable
24017 @@ -1426,32 +1920,177 @@ ENTRY(nmi)
24018 * a nested NMI that updated the copy interrupt stack frame, a
24019 * jump will be made to the repeat_nmi code that will handle the second
24022 + * However, espfix prevents us from directly returning to userspace
24023 + * with a single IRET instruction. Similarly, IRET to user mode
24024 + * can fault. We therefore handle NMIs from user space like
24025 + * other IST entries.
24028 /* Use %rdx as our temp variable throughout */
24030 CFI_REL_OFFSET rdx, 0
24032 + testb $3, CS-RIP+8(%rsp)
24033 + jz .Lnmi_from_kernel
24036 + * NMI from user mode. We need to run on the thread stack, but we
24037 + * can't go through the normal entry paths: NMIs are masked, and
24038 + * we don't want to enable interrupts, because then we'll end
24039 + * up in an awkward situation in which IRQs are on but NMIs
24046 + movq PER_CPU_VAR(kernel_stack), %rsp
24047 + pushq 5*8(%rdx) /* pt_regs->ss */
24048 + pushq 4*8(%rdx) /* pt_regs->rsp */
24049 + pushq 3*8(%rdx) /* pt_regs->flags */
24050 + pushq 2*8(%rdx) /* pt_regs->cs */
24051 + pushq 1*8(%rdx) /* pt_regs->rip */
24052 + pushq $-1 /* pt_regs->orig_ax */
24053 + pushq %rdi /* pt_regs->di */
24054 + pushq %rsi /* pt_regs->si */
24055 + pushq (%rdx) /* pt_regs->dx */
24056 + pushq %rcx /* pt_regs->cx */
24057 + pushq %rax /* pt_regs->ax */
24058 + pushq %r8 /* pt_regs->r8 */
24059 + pushq %r9 /* pt_regs->r9 */
24060 + pushq %r10 /* pt_regs->r10 */
24061 + pushq %r11 /* pt_regs->r11 */
24062 + pushq %rbx /* pt_regs->rbx */
24063 + pushq %rbp /* pt_regs->rbp */
24064 + pushq %r12 /* pt_regs->r12 */
24065 + pushq %r13 /* pt_regs->r13 */
24066 + pushq %r14 /* pt_regs->r14 */
24067 + pushq %r15 /* pt_regs->r15 */
24069 + pax_enter_kernel_nmi
24072 - * If %cs was not the kernel segment, then the NMI triggered in user
24073 - * space, which means it is definitely not nested.
24074 + * At this point we no longer need to worry about stack damage
24075 + * due to nesting -- we're on the normal thread stack and we're
24076 + * done with the NMI stack.
24078 - cmpl $__KERNEL_CS, 16(%rsp)
24085 + pax_exit_kernel_nmi
24088 + * Return back to user mode. We must *not* do the normal exit
24089 + * work, because we don't want to enable interrupts. Fortunately,
24090 + * do_nmi doesn't modify pt_regs.
24095 + * Open-code the entire return process for compatibility with varying
24096 + * register layouts across different kernel versions.
24099 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
24100 + movq R12(%rsp), %r12
24103 + addq $6*8, %rsp /* skip bx, bp, and r12-r15 */
24104 + popq %r11 /* pt_regs->r11 */
24105 + popq %r10 /* pt_regs->r10 */
24106 + popq %r9 /* pt_regs->r9 */
24107 + popq %r8 /* pt_regs->r8 */
24108 + popq %rax /* pt_regs->ax */
24109 + popq %rcx /* pt_regs->cx */
24110 + popq %rdx /* pt_regs->dx */
24111 + popq %rsi /* pt_regs->si */
24112 + popq %rdi /* pt_regs->di */
24113 + addq $8, %rsp /* skip orig_ax */
24116 +.Lnmi_from_kernel:
24118 + * Here's what our stack frame will look like:
24119 + * +---------------------------------------------------------+
24120 + * | original SS |
24121 + * | original Return RSP |
24122 + * | original RFLAGS |
24123 + * | original CS |
24124 + * | original RIP |
24125 + * +---------------------------------------------------------+
24126 + * | temp storage for rdx |
24127 + * +---------------------------------------------------------+
24128 + * | "NMI executing" variable |
24129 + * +---------------------------------------------------------+
24130 + * | iret SS } Copied from "outermost" frame |
24131 + * | iret Return RSP } on each loop iteration; overwritten |
24132 + * | iret RFLAGS } by a nested NMI to force another |
24133 + * | iret CS } iteration if needed. |
24135 + * +---------------------------------------------------------+
24136 + * | outermost SS } initialized in first_nmi; |
24137 + * | outermost Return RSP } will not be changed before |
24138 + * | outermost RFLAGS } NMI processing is done. |
24139 + * | outermost CS } Copied to "iret" frame on each |
24140 + * | outermost RIP } iteration. |
24141 + * +---------------------------------------------------------+
24143 + * +---------------------------------------------------------+
24145 + * The "original" frame is used by hardware. Before re-enabling
24146 + * NMIs, we need to be done with it, and we need to leave enough
24147 + * space for the asm code here.
24149 + * We return by executing IRET while RSP points to the "iret" frame.
24150 + * That will either return for real or it will loop back into NMI
24153 + * The "outermost" frame is copied to the "iret" frame on each
24154 + * iteration of the loop, so each iteration starts with the "iret"
24155 + * frame pointing to the final return target.
24159 + * If we interrupted kernel code between repeat_nmi and
24160 + * end_repeat_nmi, then we are a nested NMI. We must not
24161 + * modify the "iret" frame because it's being written by
24162 + * the outer NMI. That's okay: the outer NMI handler is
24163 + * about to about to call do_nmi anyway, so we can just
24164 + * resume the outer NMI.
24167 + movq $repeat_nmi, %rdx
24168 + cmpq 8(%rsp), %rdx
24170 + movq $end_repeat_nmi, %rdx
24171 + cmpq 8(%rsp), %rdx
24172 + ja nested_nmi_out
24176 - * Check the special variable on the stack to see if NMIs are
24178 + * Now check "NMI executing". If it's set, then we're nested.
24180 + * First check "NMI executing". If it's set, then we're nested.
24181 + * This will not detect if we interrupted an outer NMI just
24188 - * Now test if the previous stack was an NMI stack.
24189 - * We need the double check. We check the NMI stack to satisfy the
24190 - * race when the first NMI clears the variable before returning.
24191 - * We check the variable because the first NMI could be in a
24192 - * breakpoint routine using a breakpoint stack.
24193 + * Now test if the previous stack was an NMI stack. This covers
24194 + * the case where we interrupt an outer NMI after it clears
24195 + * "NMI executing" but before IRET. We need to be careful, though:
24196 + * there is one case in which RSP could point to the NMI stack
24197 + * despite there being no NMI active: naughty userspace controls
24198 + * RSP at the very beginning of the SYSCALL targets. We can
24199 + * pull a fast one on naughty userspace, though: we program
24200 + * SYSCALL to mask DF, so userspace cannot cause DF to be set
24201 + * if it controls the kernel's RSP. We set DF before we clear
24202 + * "NMI executing".
24204 lea 6*8(%rsp), %rdx
24205 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
24206 @@ -1462,27 +2101,22 @@ ENTRY(nmi)
24207 cmpq %rdx, 4*8(%rsp)
24208 /* If it is below the NMI stack, it is a normal NMI */
24210 - /* Ah, it is within the NMI stack, treat it as nested */
24212 + /* Ah, it is within the NMI stack. */
24214 + testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
24215 + jz first_nmi /* RSP was user controlled. */
24219 + /* This is a nested NMI. */
24223 - * Do nothing if we interrupted the fixup in repeat_nmi.
24224 - * It's about to repeat the NMI handler, so we are fine
24225 - * with ignoring this one.
24226 + * Modify the "iret" frame to point to repeat_nmi, forcing another
24227 + * iteration of NMI handling.
24229 - movq $repeat_nmi, %rdx
24230 - cmpq 8(%rsp), %rdx
24232 - movq $end_repeat_nmi, %rdx
24233 - cmpq 8(%rsp), %rdx
24234 - ja nested_nmi_out
24237 - /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24238 - leaq -1*8(%rsp), %rdx
24241 CFI_ADJUST_CFA_OFFSET 1*8
24242 leaq -10*8(%rsp), %rdx
24243 pushq_cfi $__KERNEL_DS
24244 @@ -1499,60 +2133,24 @@ nested_nmi_out:
24248 - /* No need to check faults here */
24249 + /* We are returning to kernel mode, so this cannot result in a fault. */
24250 +# pax_force_retaddr_bts
24256 - * Because nested NMIs will use the pushed location that we
24257 - * stored in rdx, we must keep that space available.
24258 - * Here's what our stack frame will look like:
24259 - * +-------------------------+
24260 - * | original SS |
24261 - * | original Return RSP |
24262 - * | original RFLAGS |
24263 - * | original CS |
24264 - * | original RIP |
24265 - * +-------------------------+
24266 - * | temp storage for rdx |
24267 - * +-------------------------+
24268 - * | NMI executing variable |
24269 - * +-------------------------+
24271 - * | copied Return RSP |
24272 - * | copied RFLAGS |
24275 - * +-------------------------+
24277 - * | Saved Return RSP |
24278 - * | Saved RFLAGS |
24281 - * +-------------------------+
24283 - * +-------------------------+
24285 - * The saved stack frame is used to fix up the copied stack frame
24286 - * that a nested NMI may change to make the interrupted NMI iret jump
24287 - * to the repeat_nmi. The original stack frame and the temp storage
24288 - * is also used by nested NMIs and can not be trusted on exit.
24290 - /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
24291 + /* Restore rdx. */
24295 /* Set the NMI executing variable on the stack. */
24299 - * Leave room for the "copied" frame
24301 + /* Leave room for the "iret" frame */
24303 CFI_ADJUST_CFA_OFFSET 5*8
24305 - /* Copy the stack frame to the Saved frame */
24306 + /* Copy the "original" frame to the "outermost" frame */
24308 pushq_cfi 11*8(%rsp)
24310 @@ -1560,6 +2158,7 @@ first_nmi:
24312 /* Everything up to here is safe from nested NMIs */
24316 * If there was a nested NMI, the first NMI's iret will return
24317 * here. But NMIs are still enabled and we can take another
24318 @@ -1568,16 +2167,21 @@ first_nmi:
24319 * it will just return, as we are about to repeat an NMI anyway.
24320 * This makes it safe to copy to the stack frame that a nested
24325 - * Update the stack variable to say we are still in NMI (the update
24326 - * is benign for the non-repeat case, where 1 was pushed just above
24327 - * to this very stack slot).
24329 + * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
24330 + * we're repeating an NMI, gsbase has the same value that it had on
24331 + * the first iteration. paranoid_entry will load the kernel
24332 + * gsbase if needed before we call do_nmi.
24334 + * Set "NMI executing" in case we came back here via IRET.
24336 movq $1, 10*8(%rsp)
24338 - /* Make another copy, this one may be modified by nested NMIs */
24340 + * Copy the "outermost" frame to the "iret" frame. NMIs that nest
24341 + * here must not modify the "iret" frame while we're writing to
24342 + * it or it will end up containing garbage.
24345 CFI_ADJUST_CFA_OFFSET -10*8
24347 @@ -1588,66 +2192,65 @@ repeat_nmi:
24351 - * Everything below this point can be preempted by a nested
24352 - * NMI if the first NMI took an exception and reset our iret stack
24353 - * so that we repeat another NMI.
24354 + * Everything below this point can be preempted by a nested NMI.
24355 + * If this happens, then the inner NMI will change the "iret"
24356 + * frame to point back to repeat_nmi.
24358 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
24359 ALLOC_PT_GPREGS_ON_STACK
24362 - * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
24363 + * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit
24364 * as we should not be calling schedule in NMI context.
24365 * Even with normal interrupts enabled. An NMI should not be
24366 * setting NEED_RESCHED or anything that normal interrupts and
24367 * exceptions might do.
24369 - call paranoid_entry
24370 + call paranoid_entry_nmi
24374 - * Save off the CR2 register. If we take a page fault in the NMI then
24375 - * it could corrupt the CR2 value. If the NMI preempts a page fault
24376 - * handler before it was able to read the CR2 register, and then the
24377 - * NMI itself takes a page fault, the page fault that was preempted
24378 - * will read the information from the NMI page fault and not the
24379 - * origin fault. Save it off and restore it if it changes.
24380 - * Use the r12 callee-saved register.
24384 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24389 - /* Did the NMI take a page fault? Restore cr2 if it did */
24396 - testl %ebx,%ebx /* swapgs needed? */
24397 + testl $1,%ebx /* swapgs needed? */
24400 SWAPGS_UNSAFE_STACK
24402 + pax_exit_kernel_nmi
24405 - /* Pop the extra iret frame at once */
24407 REMOVE_PT_GPREGS_FROM_STACK 6*8
24409 - /* Clear the NMI executing stack variable */
24410 - movq $0, 5*8(%rsp)
24412 + pax_force_retaddr_bts
24415 + * Clear "NMI executing". Set DF first so that we can easily
24416 + * distinguish the remaining code between here and IRET from
24417 + * the SYSCALL entry and exit paths. On a native kernel, we
24418 + * could just inspect RIP, but, on paravirt kernels,
24419 + * INTERRUPT_RETURN can translate into a jump into a
24420 + * hypercall page.
24423 + movq $0, 5*8(%rsp) /* clear "NMI executing" */
24426 + * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
24427 + * stack in a single instruction. We are returning to kernel
24428 + * mode, so this cannot result in a fault.
24435 ENTRY(ignore_sysret)
24440 -END(ignore_sysret)
24441 +ENDPROC(ignore_sysret)
24443 diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24444 index f5d0730..5bce89c 100644
24445 --- a/arch/x86/kernel/espfix_64.c
24446 +++ b/arch/x86/kernel/espfix_64.c
24447 @@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24448 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24449 static void *espfix_pages[ESPFIX_MAX_PAGES];
24451 -static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24452 - __aligned(PAGE_SIZE);
24453 +static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24455 static unsigned int page_random, slot_random;
24457 @@ -122,11 +121,17 @@ static void init_espfix_random(void)
24458 void __init init_espfix_bsp(void)
24461 + unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24463 /* Install the espfix pud into the kernel page directory */
24464 - pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24465 + pgd_p = &init_level4_pgt[index];
24466 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24468 +#ifdef CONFIG_PAX_PER_CPU_PGD
24469 + clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24470 + clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24473 /* Randomize the locations */
24474 init_espfix_random();
24476 @@ -194,7 +199,7 @@ void init_espfix_ap(void)
24477 set_pte(&pte_p[n*PTE_STRIDE], pte);
24479 /* Job is done for this CPU and any CPU which shares this page */
24480 - ACCESS_ONCE(espfix_pages[page]) = stack_page;
24481 + ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24484 mutex_unlock(&espfix_init_mutex);
24485 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24486 index 8b7b0a5..02219db 100644
24487 --- a/arch/x86/kernel/ftrace.c
24488 +++ b/arch/x86/kernel/ftrace.c
24489 @@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24490 * kernel identity mapping to modify code.
24492 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24493 - ip = (unsigned long)__va(__pa_symbol(ip));
24494 + ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24498 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24500 unsigned char replaced[MCOUNT_INSN_SIZE];
24502 + ip = ktla_ktva(ip);
24505 * Note: Due to modules and __init, code can
24506 * disappear and change, we need to protect against faulting
24507 @@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24508 unsigned char old[MCOUNT_INSN_SIZE];
24511 - memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24512 + memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24514 ftrace_update_func = ip;
24515 /* Make sure the breakpoints see the ftrace_update_func update */
24516 @@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24517 unsigned char replaced[MCOUNT_INSN_SIZE];
24518 unsigned char brk = BREAKPOINT_INSTRUCTION;
24520 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24521 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24524 /* Make sure it is what we expect it to be */
24525 @@ -670,11 +672,11 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
24526 /* Module allocation simplifies allocating memory for code */
24527 static inline void *alloc_tramp(unsigned long size)
24529 - return module_alloc(size);
24530 + return module_alloc_exec(size);
24532 static inline void tramp_free(void *tramp)
24534 - module_memfree(tramp);
24535 + module_memfree_exec(tramp);
24538 /* Trampolines can only be created if modules are supported */
24539 @@ -753,7 +755,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24540 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
24542 /* Copy ftrace_caller onto the trampoline memory */
24543 + pax_open_kernel();
24544 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
24545 + pax_close_kernel();
24546 if (WARN_ON(ret < 0)) {
24547 tramp_free(trampoline);
24549 @@ -763,6 +767,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24551 /* The trampoline ends with a jmp to ftrace_return */
24552 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return);
24553 + pax_open_kernel();
24554 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
24557 @@ -775,6 +780,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24559 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
24560 *ptr = (unsigned long)ops;
24561 + pax_close_kernel();
24563 op_offset -= start_offset;
24564 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
24565 @@ -792,7 +798,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24566 op_ptr.offset = offset;
24568 /* put in the new offset to the ftrace_ops */
24569 + pax_open_kernel();
24570 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
24571 + pax_close_kernel();
24573 /* ALLOC_TRAMP flags lets us know we created it */
24574 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
24575 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24576 index 5a46681..1ef7ffa 100644
24577 --- a/arch/x86/kernel/head64.c
24578 +++ b/arch/x86/kernel/head64.c
24579 @@ -68,12 +68,12 @@ again:
24583 - * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24584 - * critical -- __PAGE_OFFSET would point us back into the dynamic
24585 + * The use of __early_va rather than __va here is critical:
24586 + * __va would point us back into the dynamic
24587 * range and we might end up looping forever...
24590 - pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24591 + pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24593 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24594 reset_early_page_tables();
24595 @@ -83,13 +83,13 @@ again:
24596 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24597 for (i = 0; i < PTRS_PER_PUD; i++)
24599 - *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24600 + *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24602 pud_p += pud_index(address);
24606 - pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24607 + pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24609 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24610 reset_early_page_tables();
24611 @@ -99,7 +99,7 @@ again:
24612 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24613 for (i = 0; i < PTRS_PER_PMD; i++)
24615 - *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24616 + *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24618 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24619 pmd_p[pmd_index(address)] = pmd;
24620 @@ -177,7 +177,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24624 - clear_page(init_level4_pgt);
24625 /* set init_level4_pgt kernel high mapping*/
24626 init_level4_pgt[511] = early_level4_pgt[511];
24628 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24629 index 7e429c9..7244a52 100644
24630 --- a/arch/x86/kernel/head_32.S
24631 +++ b/arch/x86/kernel/head_32.S
24633 /* Physical address */
24634 #define pa(X) ((X) - __PAGE_OFFSET)
24636 +#ifdef CONFIG_PAX_KERNEXEC
24639 +#define ta(X) ((X) - __PAGE_OFFSET)
24643 * References to members of the new_cpu_data structure.
24646 * and small than max_low_pfn, otherwise will waste some page table entries
24649 -#if PTRS_PER_PMD > 1
24650 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24652 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24654 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24657 * Number of possible pages in the lowmem region.
24658 @@ -86,6 +88,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24659 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24662 + * Real beginning of normal "text" segment
24668 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24669 * %esi points to the real-mode code as a 32-bit pointer.
24670 * CS and DS must be 4 GB flat segments, but we don't depend on
24671 @@ -93,6 +101,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24676 +#ifdef CONFIG_PAX_KERNEXEC
24678 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24679 +.fill PAGE_SIZE-5,1,0xcc
24683 movl pa(stack_start),%ecx
24685 @@ -114,6 +129,59 @@ ENTRY(startup_32)
24687 leal -__PAGE_OFFSET(%ecx),%esp
24690 + movl $pa(cpu_gdt_table),%edi
24691 + movl $__per_cpu_load,%eax
24692 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24694 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24695 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24696 + movl $__per_cpu_end - 1,%eax
24697 + subl $__per_cpu_start,%eax
24698 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24702 + movl $NR_CPUS,%ecx
24703 + movl $pa(cpu_gdt_table),%edi
24705 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24706 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24707 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24708 + addl $PAGE_SIZE_asm,%edi
24712 +#ifdef CONFIG_PAX_KERNEXEC
24713 + movl $pa(boot_gdt),%edi
24714 + movl $__LOAD_PHYSICAL_ADDR,%eax
24715 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24717 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24718 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24721 + ljmp $(__BOOT_CS),$1f
24724 + movl $NR_CPUS,%ecx
24725 + movl $pa(cpu_gdt_table),%edi
24726 + addl $__PAGE_OFFSET,%eax
24728 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24729 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24730 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24731 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24733 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24734 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24735 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24736 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24738 + addl $PAGE_SIZE_asm,%edi
24743 * Clear BSS first so that there are no surprises...
24745 @@ -209,8 +277,11 @@ ENTRY(startup_32)
24746 movl %eax, pa(max_pfn_mapped)
24748 /* Do early initialization of the fixmap area */
24749 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24750 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24751 +#ifdef CONFIG_COMPAT_VDSO
24752 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24754 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24756 #else /* Not PAE */
24758 page_pde_offset = (__PAGE_OFFSET >> 20);
24759 @@ -240,8 +311,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24760 movl %eax, pa(max_pfn_mapped)
24762 /* Do early initialization of the fixmap area */
24763 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24764 - movl %eax,pa(initial_page_table+0xffc)
24765 +#ifdef CONFIG_COMPAT_VDSO
24766 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24768 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24772 #ifdef CONFIG_PARAVIRT
24773 @@ -255,9 +329,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24774 cmpl $num_subarch_entries, %eax
24777 - movl pa(subarch_entries)(,%eax,4), %eax
24778 - subl $__PAGE_OFFSET, %eax
24780 + jmp *pa(subarch_entries)(,%eax,4)
24784 @@ -269,10 +341,10 @@ WEAK(xen_entry)
24788 - .long default_entry /* normal x86/PC */
24789 - .long lguest_entry /* lguest hypervisor */
24790 - .long xen_entry /* Xen hypervisor */
24791 - .long default_entry /* Moorestown MID */
24792 + .long ta(default_entry) /* normal x86/PC */
24793 + .long ta(lguest_entry) /* lguest hypervisor */
24794 + .long ta(xen_entry) /* Xen hypervisor */
24795 + .long ta(default_entry) /* Moorestown MID */
24796 num_subarch_entries = (. - subarch_entries) / 4
24799 @@ -362,6 +434,7 @@ default_entry:
24800 movl pa(mmu_cr4_features),%eax
24803 +#ifdef CONFIG_X86_PAE
24804 testb $X86_CR4_PAE, %al # check if PAE is enabled
24807 @@ -390,6 +463,9 @@ default_entry:
24808 /* Make changes effective */
24811 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24817 @@ -457,14 +533,20 @@ is486:
24818 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24819 movl %eax,%ss # after changing gdt.
24821 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
24822 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24826 movl $(__KERNEL_PERCPU), %eax
24827 movl %eax,%fs # set this cpu's percpu
24829 +#ifdef CONFIG_CC_STACKPROTECTOR
24830 movl $(__KERNEL_STACK_CANARY),%eax
24831 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24832 + movl $(__USER_DS),%eax
24838 xorl %eax,%eax # Clear LDT
24839 @@ -521,8 +603,11 @@ setup_once:
24840 * relocation. Manually set base address in stack canary
24841 * segment descriptor.
24843 - movl $gdt_page,%eax
24844 + movl $cpu_gdt_table,%eax
24845 movl $stack_canary,%ecx
24847 + addl $__per_cpu_load,%ecx
24849 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24851 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24852 @@ -559,7 +644,7 @@ early_idt_handler_common:
24853 cmpl $2,(%esp) # X86_TRAP_NMI
24854 je is_nmi # Ignore NMI
24856 - cmpl $2,%ss:early_recursion_flag
24857 + cmpl $1,%ss:early_recursion_flag
24859 incl %ss:early_recursion_flag
24861 @@ -597,8 +682,8 @@ early_idt_handler_common:
24862 pushl (20+6*4)(%esp) /* trapno */
24871 @@ -618,8 +703,11 @@ ENDPROC(early_idt_handler_common)
24872 /* This is the default interrupt "handler" :-) */
24876 #ifdef CONFIG_PRINTK
24877 + cmpl $2,%ss:early_recursion_flag
24879 + incl %ss:early_recursion_flag
24884 @@ -628,9 +716,6 @@ ignore_int:
24885 movl $(__KERNEL_DS),%eax
24888 - cmpl $2,early_recursion_flag
24890 - incl early_recursion_flag
24894 @@ -664,29 +749,34 @@ ENTRY(setup_once_ref)
24898 -__PAGE_ALIGNED_BSS
24900 #ifdef CONFIG_X86_PAE
24901 +.section .initial_pg_pmd,"a",@progbits
24903 .fill 1024*KPMDS,4,0
24905 +.section .initial_page_table,"a",@progbits
24906 ENTRY(initial_page_table)
24909 +.section .initial_pg_fixmap,"a",@progbits
24912 +.section .empty_zero_page,"a",@progbits
24913 ENTRY(empty_zero_page)
24915 +.section .swapper_pg_dir,"a",@progbits
24916 ENTRY(swapper_pg_dir)
24917 +#ifdef CONFIG_X86_PAE
24924 * This starts the data section.
24926 #ifdef CONFIG_X86_PAE
24927 -__PAGE_ALIGNED_DATA
24928 - /* Page-aligned for the benefit of paravirt? */
24930 +.section .initial_page_table,"a",@progbits
24931 ENTRY(initial_page_table)
24932 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24934 @@ -705,12 +795,20 @@ ENTRY(initial_page_table)
24935 # error "Kernel PMDs should be 1, 2 or 3"
24937 .align PAGE_SIZE /* needs to be page-sized too */
24939 +#ifdef CONFIG_PAX_PER_CPU_PGD
24951 - .long init_thread_union+THREAD_SIZE
24952 + .long init_thread_union+THREAD_SIZE-8
24956 @@ -738,7 +836,7 @@ fault_msg:
24957 * segment size, and 32-bit linear address value:
24961 +.section .rodata,"a",@progbits
24962 .globl boot_gdt_descr
24965 @@ -747,7 +845,7 @@ fault_msg:
24966 .word 0 # 32 bit align gdt_desc.address
24969 - .long boot_gdt - __PAGE_OFFSET
24970 + .long pa(boot_gdt)
24972 .word 0 # 32-bit align idt_desc.address
24974 @@ -758,7 +856,7 @@ idt_descr:
24975 .word 0 # 32 bit align gdt_desc.address
24976 ENTRY(early_gdt_descr)
24977 .word GDT_ENTRIES*8-1
24978 - .long gdt_page /* Overwritten for secondary CPUs */
24979 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
24982 * The boot_gdt must mirror the equivalent in setup.S and is
24983 @@ -767,5 +865,65 @@ ENTRY(early_gdt_descr)
24984 .align L1_CACHE_BYTES
24986 .fill GDT_ENTRY_BOOT_CS,8,0
24987 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24988 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24989 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24990 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24992 + .align PAGE_SIZE_asm
24993 +ENTRY(cpu_gdt_table)
24995 + .quad 0x0000000000000000 /* NULL descriptor */
24996 + .quad 0x0000000000000000 /* 0x0b reserved */
24997 + .quad 0x0000000000000000 /* 0x13 reserved */
24998 + .quad 0x0000000000000000 /* 0x1b reserved */
25000 +#ifdef CONFIG_PAX_KERNEXEC
25001 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
25003 + .quad 0x0000000000000000 /* 0x20 unused */
25006 + .quad 0x0000000000000000 /* 0x28 unused */
25007 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
25008 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
25009 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
25010 + .quad 0x0000000000000000 /* 0x4b reserved */
25011 + .quad 0x0000000000000000 /* 0x53 reserved */
25012 + .quad 0x0000000000000000 /* 0x5b reserved */
25014 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
25015 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
25016 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
25017 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
25019 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25020 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25023 + * Segments used for calling PnP BIOS have byte granularity.
25024 + * The code segments and data segments have fixed 64k limits,
25025 + * the transfer segment sizes are set at run time.
25027 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
25028 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
25029 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
25030 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
25031 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
25034 + * The APM segments have byte granularity and their bases
25035 + * are set at run time. All have 64k limits.
25037 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25038 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25039 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
25041 + .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25042 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25043 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25044 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25045 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25046 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25048 + /* Be sure this is zeroed to avoid false validations in Xen */
25049 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25051 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25052 index df7e780..e97a497 100644
25053 --- a/arch/x86/kernel/head_64.S
25054 +++ b/arch/x86/kernel/head_64.S
25056 #include <asm/processor-flags.h>
25057 #include <asm/percpu.h>
25058 #include <asm/nops.h>
25059 +#include <asm/cpufeature.h>
25060 +#include <asm/alternative-asm.h>
25062 #ifdef CONFIG_PARAVIRT
25063 #include <asm/asm-offsets.h>
25064 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25065 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25066 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25067 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25068 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
25069 +L3_VMALLOC_START = pud_index(VMALLOC_START)
25070 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
25071 +L3_VMALLOC_END = pud_index(VMALLOC_END)
25072 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25073 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25077 @@ -89,11 +97,26 @@ startup_64:
25078 * Fixup the physical addresses in the page table
25080 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25081 + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25082 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25083 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25084 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25085 + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25087 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25088 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25089 + addq %rbp, level3_ident_pgt + (0*8)(%rip)
25090 +#ifndef CONFIG_XEN
25091 + addq %rbp, level3_ident_pgt + (1*8)(%rip)
25094 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25096 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25097 + addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25099 + addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
25100 + addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
25101 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25102 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25105 * Set up the identity mapping for the switchover. These
25106 @@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
25107 * after the boot processor executes this code.
25111 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25114 - /* Enable PAE mode and PGE */
25115 - movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25116 + /* Enable PAE mode and PSE/PGE */
25117 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25120 /* Setup early boot stage 4 level pagetables. */
25121 @@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
25122 movl $MSR_EFER, %ecx
25124 btsl $_EFER_SCE, %eax /* Enable System Call */
25125 - btl $20,%edi /* No Execute supported? */
25126 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25128 btsl $_EFER_NX, %eax
25131 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25132 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25133 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25134 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25135 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25136 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
25137 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
25138 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25139 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25140 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25141 1: wrmsr /* Make changes effective */
25144 @@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
25145 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25146 * address given in m16:64.
25148 + pax_set_fptr_mask
25149 movq initial_code(%rip),%rax
25150 pushq $0 # fake return address to stop unwinder
25151 pushq $__KERNEL_CS # set correct cs
25152 @@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
25153 .quad INIT_PER_CPU_VAR(irq_stack_union)
25155 GLOBAL(stack_start)
25156 - .quad init_thread_union+THREAD_SIZE-8
25157 + .quad init_thread_union+THREAD_SIZE-16
25161 @@ -393,7 +429,7 @@ early_idt_handler_common:
25163 #ifdef CONFIG_KALLSYMS
25164 leaq early_idt_ripmsg(%rip),%rdi
25165 - movq 40(%rsp),%rsi # %rip again
25166 + movq 88(%rsp),%rsi # %rip again
25167 call __print_symbol
25169 #endif /* EARLY_PRINTK */
25170 @@ -422,6 +458,7 @@ ENDPROC(early_idt_handler_common)
25171 early_recursion_flag:
25174 + .section .rodata,"a",@progbits
25175 #ifdef CONFIG_EARLY_PRINTK
25177 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25178 @@ -449,29 +486,52 @@ NEXT_PAGE(early_level4_pgt)
25179 NEXT_PAGE(early_dynamic_pgts)
25180 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25183 + .section .rodata,"a",@progbits
25185 -#ifndef CONFIG_XEN
25186 NEXT_PAGE(init_level4_pgt)
25189 -NEXT_PAGE(init_level4_pgt)
25190 - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25191 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25192 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25193 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
25194 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25195 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
25196 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25197 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25198 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25199 .org init_level4_pgt + L4_START_KERNEL*8, 0
25200 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25201 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25203 +#ifdef CONFIG_PAX_PER_CPU_PGD
25204 +NEXT_PAGE(cpu_pgd)
25210 NEXT_PAGE(level3_ident_pgt)
25211 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25215 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25219 +NEXT_PAGE(level3_vmalloc_start_pgt)
25222 +NEXT_PAGE(level3_vmalloc_end_pgt)
25225 +NEXT_PAGE(level3_vmemmap_pgt)
25226 + .fill L3_VMEMMAP_START,8,0
25227 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25229 NEXT_PAGE(level2_ident_pgt)
25230 - /* Since I easily can, map the first 1G.
25231 + /* Since I easily can, map the first 2G.
25232 * Don't set NX because code runs from these pages.
25234 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25236 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25238 NEXT_PAGE(level3_kernel_pgt)
25239 .fill L3_START_KERNEL,8,0
25240 @@ -479,6 +539,9 @@ NEXT_PAGE(level3_kernel_pgt)
25241 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25242 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25244 +NEXT_PAGE(level2_vmemmap_pgt)
25247 NEXT_PAGE(level2_kernel_pgt)
25249 * 512 MB kernel mapping. We spend a full page on this pagetable
25250 @@ -494,23 +557,61 @@ NEXT_PAGE(level2_kernel_pgt)
25251 KERNEL_IMAGE_SIZE/PMD_SIZE)
25253 NEXT_PAGE(level2_fixmap_pgt)
25255 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25256 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25259 + .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
25260 + .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
25261 + .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
25262 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25263 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25266 NEXT_PAGE(level1_fixmap_pgt)
25269 +NEXT_PAGE(level1_vsyscall_pgt)
25276 +ENTRY(cpu_gdt_table)
25278 + .quad 0x0000000000000000 /* NULL descriptor */
25279 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25280 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
25281 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
25282 + .quad 0x00cffb000000ffff /* __USER32_CS */
25283 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25284 + .quad 0x00affb000000ffff /* __USER_CS */
25286 +#ifdef CONFIG_PAX_KERNEXEC
25287 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25289 + .quad 0x0 /* unused */
25292 + .quad 0,0 /* TSS */
25293 + .quad 0,0 /* LDT */
25294 + .quad 0,0,0 /* three TLS descriptors */
25295 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
25296 + /* asm/segment.h:GDT_ENTRIES must match this */
25298 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25299 + .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25301 + .quad 0x0 /* unused */
25304 + /* zero the remaining page */
25305 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25309 .globl early_gdt_descr
25311 .word GDT_ENTRIES*8-1
25312 early_gdt_descr_base:
25313 - .quad INIT_PER_CPU_VAR(gdt_page)
25314 + .quad cpu_gdt_table
25317 /* This must match the first entry in level2_kernel_pgt */
25318 @@ -534,8 +635,8 @@ NEXT_PAGE(kasan_zero_pud)
25321 #include "../../x86/xen/xen-head.S"
25323 - __PAGE_ALIGNED_BSS
25325 + .section .rodata,"a",@progbits
25326 NEXT_PAGE(empty_zero_page)
25329 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25330 index 05fd74f..c3548b1 100644
25331 --- a/arch/x86/kernel/i386_ksyms_32.c
25332 +++ b/arch/x86/kernel/i386_ksyms_32.c
25333 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25334 EXPORT_SYMBOL(cmpxchg8b_emu);
25337 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
25339 /* Networking helper routines. */
25340 EXPORT_SYMBOL(csum_partial_copy_generic);
25341 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25342 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25344 EXPORT_SYMBOL(__get_user_1);
25345 EXPORT_SYMBOL(__get_user_2);
25346 @@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25347 EXPORT_SYMBOL(___preempt_schedule_context);
25351 +#ifdef CONFIG_PAX_KERNEXEC
25352 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25355 +#ifdef CONFIG_PAX_PER_CPU_PGD
25356 +EXPORT_SYMBOL(cpu_pgd);
25358 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25359 index e7cc537..67d7372 100644
25360 --- a/arch/x86/kernel/i8259.c
25361 +++ b/arch/x86/kernel/i8259.c
25362 @@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25363 static void make_8259A_irq(unsigned int irq)
25365 disable_irq_nosync(irq);
25366 - io_apic_irqs &= ~(1<<irq);
25367 + io_apic_irqs &= ~(1UL<<irq);
25368 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25371 @@ -208,7 +208,7 @@ spurious_8259A_irq:
25372 "spurious 8259A interrupt: IRQ%d.\n", irq);
25373 spurious_irq_mask |= irqmask;
25375 - atomic_inc(&irq_err_count);
25376 + atomic_inc_unchecked(&irq_err_count);
25378 * Theoretically we do not have to handle this IRQ,
25379 * but in Linux this does not cause problems and is
25380 @@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25381 /* (slave's support for AEOI in flat mode is to be investigated) */
25382 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25384 + pax_open_kernel();
25387 * In AEOI mode we just have to mask the interrupt
25390 - i8259A_chip.irq_mask_ack = disable_8259A_irq;
25391 + *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25393 - i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25394 + *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25395 + pax_close_kernel();
25397 udelay(100); /* wait for 8259A to initialize */
25399 diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25400 index a979b5b..1d6db75 100644
25401 --- a/arch/x86/kernel/io_delay.c
25402 +++ b/arch/x86/kernel/io_delay.c
25403 @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25404 * Quirk table for systems that misbehave (lock up, etc.) if port
25407 -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25408 +static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25410 .callback = dmi_io_delay_0xed_port,
25411 .ident = "Compaq Presario V6000",
25412 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25413 index 37dae79..620dd84 100644
25414 --- a/arch/x86/kernel/ioport.c
25415 +++ b/arch/x86/kernel/ioport.c
25417 #include <linux/sched.h>
25418 #include <linux/kernel.h>
25419 #include <linux/capability.h>
25420 +#include <linux/security.h>
25421 #include <linux/errno.h>
25422 #include <linux/types.h>
25423 #include <linux/ioport.h>
25424 @@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25426 if (turn_on && !capable(CAP_SYS_RAWIO))
25428 +#ifdef CONFIG_GRKERNSEC_IO
25429 + if (turn_on && grsec_disable_privio) {
25430 + gr_handle_ioperm();
25436 * If it's the first ioperm() call in this thread's lifetime, set the
25437 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25438 * because the ->io_bitmap_max value must match the bitmap
25441 - tss = &per_cpu(cpu_tss, get_cpu());
25442 + tss = cpu_tss + get_cpu();
25445 bitmap_clear(t->io_bitmap_ptr, from, num);
25446 @@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25448 if (!capable(CAP_SYS_RAWIO))
25450 +#ifdef CONFIG_GRKERNSEC_IO
25451 + if (grsec_disable_privio) {
25452 + gr_handle_iopl();
25457 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25458 t->iopl = level << 12;
25459 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25460 index e5952c2..11c3a54 100644
25461 --- a/arch/x86/kernel/irq.c
25462 +++ b/arch/x86/kernel/irq.c
25464 #define CREATE_TRACE_POINTS
25465 #include <asm/trace/irq_vectors.h>
25467 -atomic_t irq_err_count;
25468 +atomic_unchecked_t irq_err_count;
25470 /* Function pointer for generic interrupt vector handling */
25471 void (*x86_platform_ipi_callback)(void) = NULL;
25472 @@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25473 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25474 seq_puts(p, " Hypervisor callback interrupts\n");
25476 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25477 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25478 #if defined(CONFIG_X86_IO_APIC)
25479 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25480 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25484 @@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25486 u64 arch_irq_stat(void)
25488 - u64 sum = atomic_read(&irq_err_count);
25489 + u64 sum = atomic_read_unchecked(&irq_err_count);
25493 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25494 index f9fd86a..e6cc9ae 100644
25495 --- a/arch/x86/kernel/irq_32.c
25496 +++ b/arch/x86/kernel/irq_32.c
25497 @@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25499 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25501 +extern void gr_handle_kernel_exploit(void);
25503 int sysctl_panic_on_stackoverflow __read_mostly;
25505 /* Debugging check for stack overflow: is there less than 1KB free? */
25506 @@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25507 __asm__ __volatile__("andl %%esp,%0" :
25508 "=r" (sp) : "0" (THREAD_SIZE - 1));
25510 - return sp < (sizeof(struct thread_info) + STACK_WARN);
25511 + return sp < STACK_WARN;
25514 static void print_stack_overflow(void)
25516 printk(KERN_WARNING "low stack detected by irq handler\n");
25518 + gr_handle_kernel_exploit();
25519 if (sysctl_panic_on_stackoverflow)
25520 panic("low stack detected by irq handler - check messages\n");
25522 @@ -77,10 +80,9 @@ static inline void *current_stack(void)
25524 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25526 - struct irq_stack *curstk, *irqstk;
25527 + struct irq_stack *irqstk;
25528 u32 *isp, *prev_esp, arg1, arg2;
25530 - curstk = (struct irq_stack *) current_stack();
25531 irqstk = __this_cpu_read(hardirq_stack);
25534 @@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25535 * handler) we can't do that and just have to keep using the
25536 * current stack (which is the irq stack already after all)
25538 - if (unlikely(curstk == irqstk))
25539 + if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25542 - isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25543 + isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25545 /* Save the next esp at the bottom of the stack */
25546 prev_esp = (u32 *)irqstk;
25547 *prev_esp = current_stack_pointer();
25549 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25550 + __set_fs(MAKE_MM_SEG(0));
25553 if (unlikely(overflow))
25554 call_on_stack(print_stack_overflow, isp);
25556 @@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25557 : "0" (irq), "1" (desc), "2" (isp),
25558 "D" (desc->handle_irq)
25559 : "memory", "cc", "ecx");
25561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25562 + __set_fs(current_thread_info()->addr_limit);
25568 @@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25570 void irq_ctx_init(int cpu)
25572 - struct irq_stack *irqstk;
25574 if (per_cpu(hardirq_stack, cpu))
25577 - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25579 - THREAD_SIZE_ORDER));
25580 - per_cpu(hardirq_stack, cpu) = irqstk;
25582 - irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25584 - THREAD_SIZE_ORDER));
25585 - per_cpu(softirq_stack, cpu) = irqstk;
25587 - printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25588 - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25589 + per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25590 + per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25593 void do_softirq_own_stack(void)
25595 - struct thread_info *curstk;
25596 struct irq_stack *irqstk;
25597 u32 *isp, *prev_esp;
25599 - curstk = current_stack();
25600 irqstk = __this_cpu_read(softirq_stack);
25602 /* build the stack frame on the softirq stack */
25603 @@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25604 prev_esp = (u32 *)irqstk;
25605 *prev_esp = current_stack_pointer();
25607 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25608 + __set_fs(MAKE_MM_SEG(0));
25611 call_on_stack(__do_softirq, isp);
25613 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25614 + __set_fs(current_thread_info()->addr_limit);
25619 bool handle_irq(unsigned irq, struct pt_regs *regs)
25620 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25621 index 394e643..824fce8 100644
25622 --- a/arch/x86/kernel/irq_64.c
25623 +++ b/arch/x86/kernel/irq_64.c
25624 @@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25625 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25626 EXPORT_PER_CPU_SYMBOL(irq_regs);
25628 +extern void gr_handle_kernel_exploit(void);
25630 int sysctl_panic_on_stackoverflow;
25633 @@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25634 irq_stack_top, irq_stack_bottom,
25635 estack_top, estack_bottom);
25637 + gr_handle_kernel_exploit();
25639 if (sysctl_panic_on_stackoverflow)
25640 panic("low stack detected by irq handler - check messages\n");
25642 diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25643 index 26d5a55..a01160a 100644
25644 --- a/arch/x86/kernel/jump_label.c
25645 +++ b/arch/x86/kernel/jump_label.c
25646 @@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25647 * Jump label is enabled for the first time.
25648 * So we expect a default_nop...
25650 - if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25651 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25653 bug_at((void *)entry->code, __LINE__);
25655 @@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25656 * ...otherwise expect an ideal_nop. Otherwise
25657 * something went horribly wrong.
25659 - if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25660 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25662 bug_at((void *)entry->code, __LINE__);
25664 @@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25665 * are converting the default nop to the ideal nop.
25668 - if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25669 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25670 bug_at((void *)entry->code, __LINE__);
25673 code.offset = entry->target -
25674 (entry->code + JUMP_LABEL_NOP_SIZE);
25675 - if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25676 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25677 bug_at((void *)entry->code, __LINE__);
25679 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25680 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25681 index d6178d9..e12482f 100644
25682 --- a/arch/x86/kernel/kgdb.c
25683 +++ b/arch/x86/kernel/kgdb.c
25684 @@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25685 bp->attr.bp_addr = breakinfo[breakno].addr;
25686 bp->attr.bp_len = breakinfo[breakno].len;
25687 bp->attr.bp_type = breakinfo[breakno].type;
25688 - info->address = breakinfo[breakno].addr;
25689 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25690 + info->address = ktla_ktva(breakinfo[breakno].addr);
25692 + info->address = breakinfo[breakno].addr;
25693 info->len = breakinfo[breakno].len;
25694 info->type = breakinfo[breakno].type;
25695 val = arch_install_hw_breakpoint(bp);
25696 @@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25698 /* clear the trace bit */
25699 linux_regs->flags &= ~X86_EFLAGS_TF;
25700 - atomic_set(&kgdb_cpu_doing_single_step, -1);
25701 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25703 /* set the trace bit if we're stepping */
25704 if (remcomInBuffer[0] == 's') {
25705 linux_regs->flags |= X86_EFLAGS_TF;
25706 - atomic_set(&kgdb_cpu_doing_single_step,
25707 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25708 raw_smp_processor_id());
25711 @@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25715 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25716 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25717 if (user_mode(regs))
25718 return single_step_cont(regs, args);
25720 @@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25721 #endif /* CONFIG_DEBUG_RODATA */
25723 bpt->type = BP_BREAKPOINT;
25724 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25725 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25729 - err = probe_kernel_write((char *)bpt->bpt_addr,
25730 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25731 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25732 #ifdef CONFIG_DEBUG_RODATA
25734 @@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25736 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25738 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25739 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25742 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25743 @@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25744 if (mutex_is_locked(&text_mutex))
25746 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25747 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25748 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25749 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25753 #endif /* CONFIG_DEBUG_RODATA */
25754 - return probe_kernel_write((char *)bpt->bpt_addr,
25755 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25756 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25759 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25760 index 1deffe6..4705700 100644
25761 --- a/arch/x86/kernel/kprobes/core.c
25762 +++ b/arch/x86/kernel/kprobes/core.c
25763 @@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25767 - insn = (struct __arch_relative_insn *)from;
25768 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
25770 + pax_open_kernel();
25771 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25773 + pax_close_kernel();
25776 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25777 @@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25778 kprobe_opcode_t opcode;
25779 kprobe_opcode_t *orig_opcodes = opcodes;
25781 - if (search_exception_tables((unsigned long)opcodes))
25782 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25783 return 0; /* Page fault may occur on this address. */
25786 @@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25787 * Fortunately, we know that the original code is the ideal 5-byte
25790 - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25791 + memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25793 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25795 buf[0] = kp->opcode;
25796 - return (unsigned long)buf;
25797 + return ktva_ktla((unsigned long)buf);
25801 @@ -367,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25802 /* Another subsystem puts a breakpoint, failed to recover */
25803 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25805 + pax_open_kernel();
25806 memcpy(dest, insn.kaddr, length);
25807 + pax_close_kernel();
25809 #ifdef CONFIG_X86_64
25810 if (insn_rip_relative(&insn)) {
25811 @@ -394,7 +399,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25814 disp = (u8 *) dest + insn_offset_displacement(&insn);
25815 + pax_open_kernel();
25816 *(s32 *) disp = (s32) newdisp;
25817 + pax_close_kernel();
25821 @@ -536,7 +543,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25822 * nor set current_kprobe, because it doesn't use single
25825 - regs->ip = (unsigned long)p->ainsn.insn;
25826 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25827 preempt_enable_no_resched();
25830 @@ -553,9 +560,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25831 regs->flags &= ~X86_EFLAGS_IF;
25832 /* single step inline if the instruction is an int3 */
25833 if (p->opcode == BREAKPOINT_INSTRUCTION)
25834 - regs->ip = (unsigned long)p->addr;
25835 + regs->ip = ktla_ktva((unsigned long)p->addr);
25837 - regs->ip = (unsigned long)p->ainsn.insn;
25838 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25840 NOKPROBE_SYMBOL(setup_singlestep);
25842 @@ -640,7 +647,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25843 setup_singlestep(p, regs, kcb, 0);
25846 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
25847 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25849 * The breakpoint instruction was removed right
25850 * after we hit it. Another cpu has removed
25851 @@ -687,6 +694,9 @@ static void __used kretprobe_trampoline_holder(void)
25852 " movq %rax, 152(%rsp)\n"
25853 RESTORE_REGS_STRING
25855 +#ifdef KERNEXEC_PLUGIN
25856 + " btsq $63,(%rsp)\n"
25861 @@ -827,7 +837,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25862 struct kprobe_ctlblk *kcb)
25864 unsigned long *tos = stack_addr(regs);
25865 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25866 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25867 unsigned long orig_ip = (unsigned long)p->addr;
25868 kprobe_opcode_t *insn = p->ainsn.insn;
25870 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25871 index 7b3b9d1..e2478b91 100644
25872 --- a/arch/x86/kernel/kprobes/opt.c
25873 +++ b/arch/x86/kernel/kprobes/opt.c
25874 @@ -79,6 +79,7 @@ found:
25875 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25876 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25878 + pax_open_kernel();
25879 #ifdef CONFIG_X86_64
25882 @@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25885 *(unsigned long *)addr = val;
25886 + pax_close_kernel();
25890 @@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25891 * Verify if the address gap is in 2GB range, because this uses
25894 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25895 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25896 if (abs(rel) > 0x7fffffff) {
25897 __arch_remove_optimized_kprobe(op, 0);
25899 @@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25900 op->optinsn.size = ret;
25902 /* Copy arch-dep-instance from template */
25903 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25904 + pax_open_kernel();
25905 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25906 + pax_close_kernel();
25908 /* Set probe information */
25909 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25911 /* Set probe function call */
25912 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25913 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25915 /* Set returning jmp instruction at the tail of out-of-line buffer */
25916 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25917 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25918 (u8 *)op->kp.addr + op->optinsn.size);
25920 flush_icache_range((unsigned long) buf,
25921 @@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25922 WARN_ON(kprobe_disabled(&op->kp));
25924 /* Backup instructions which will be replaced by jump address */
25925 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25926 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25927 RELATIVE_ADDR_SIZE);
25929 insn_buf[0] = RELATIVEJUMP_OPCODE;
25930 @@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25931 /* This kprobe is really able to run optimized path. */
25932 op = container_of(p, struct optimized_kprobe, kp);
25933 /* Detour through copied instructions */
25934 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25935 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25937 reset_current_kprobe();
25938 preempt_enable_no_resched();
25939 diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25940 index c2bedae..25e7ab60 100644
25941 --- a/arch/x86/kernel/ksysfs.c
25942 +++ b/arch/x86/kernel/ksysfs.c
25943 @@ -184,7 +184,7 @@ out:
25945 static struct kobj_attribute type_attr = __ATTR_RO(type);
25947 -static struct bin_attribute data_attr = {
25948 +static bin_attribute_no_const data_attr __read_only = {
25952 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25953 index c37886d..f43b63d 100644
25954 --- a/arch/x86/kernel/ldt.c
25955 +++ b/arch/x86/kernel/ldt.c
25957 #include <linux/sched.h>
25958 #include <linux/string.h>
25959 #include <linux/mm.h>
25960 +#include <linux/ratelimit.h>
25961 #include <linux/smp.h>
25962 #include <linux/vmalloc.h>
25963 #include <linux/uaccess.h>
25965 #include <asm/mmu_context.h>
25966 #include <asm/syscalls.h>
25968 +#ifdef CONFIG_GRKERNSEC
25969 +int sysctl_modify_ldt __read_only = 0;
25970 +#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL)
25971 +int sysctl_modify_ldt __read_only = 1;
25973 +int sysctl_modify_ldt __read_only = 0;
25977 static void flush_ldt(void *current_mm)
25979 @@ -66,13 +75,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25984 + load_LDT_nolock(pc);
25985 if (!cpumask_equal(mm_cpumask(current->mm),
25986 cpumask_of(smp_processor_id())))
25987 smp_call_function(flush_ldt, current->mm, 1);
25991 + load_LDT_nolock(pc);
25995 @@ -94,7 +103,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25998 for (i = 0; i < old->size; i++)
25999 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26000 + write_ldt_entry(new->ldt, i, old->ldt + i);
26004 @@ -115,6 +124,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26005 retval = copy_ldt(&mm->context, &old_mm->context);
26006 mutex_unlock(&old_mm->context.lock);
26009 + if (tsk == current) {
26010 + mm->context.vdso = 0;
26012 +#ifdef CONFIG_X86_32
26013 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26014 + mm->context.user_cs_base = 0UL;
26015 + mm->context.user_cs_limit = ~0UL;
26017 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26018 + cpumask_clear(&mm->context.cpu_user_cs_mask);
26029 @@ -229,6 +256,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26033 +#ifdef CONFIG_PAX_SEGMEXEC
26034 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26040 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26043 @@ -254,6 +288,15 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
26047 + if (!sysctl_modify_ldt) {
26048 + printk_ratelimited(KERN_INFO
26049 + "Denied a call to modify_ldt() from %s[%d] (uid: %d)."
26050 + " Adjust sysctl if this was not an exploit attempt.\n",
26051 + current->comm, task_pid_nr(current),
26052 + from_kuid_munged(current_user_ns(), current_uid()));
26058 ret = read_ldt(ptr, bytecount);
26059 diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
26060 index ff3c3101d..d7c0cd8 100644
26061 --- a/arch/x86/kernel/livepatch.c
26062 +++ b/arch/x86/kernel/livepatch.c
26063 @@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
26064 int ret, numpages, size = 4;
26067 - unsigned long core = (unsigned long)mod->module_core;
26068 - unsigned long core_ro_size = mod->core_ro_size;
26069 - unsigned long core_size = mod->core_size;
26070 + unsigned long core_rx = (unsigned long)mod->module_core_rx;
26071 + unsigned long core_rw = (unsigned long)mod->module_core_rw;
26072 + unsigned long core_size_rx = mod->core_size_rx;
26073 + unsigned long core_size_rw = mod->core_size_rw;
26076 case R_X86_64_NONE:
26077 @@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
26081 - if (loc < core || loc >= core + core_size)
26082 + if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
26083 + (loc < core_rw || loc >= core_rw + core_size_rw))
26084 /* loc does not point to any symbol inside the module */
26087 - if (loc < core + core_ro_size)
26088 + if (loc < core_rx + core_size_rx)
26092 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26093 index 469b23d..5449cfe 100644
26094 --- a/arch/x86/kernel/machine_kexec_32.c
26095 +++ b/arch/x86/kernel/machine_kexec_32.c
26097 #include <asm/cacheflush.h>
26098 #include <asm/debugreg.h>
26100 -static void set_idt(void *newidt, __u16 limit)
26101 +static void set_idt(struct desc_struct *newidt, __u16 limit)
26103 struct desc_ptr curidt;
26105 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
26109 -static void set_gdt(void *newgdt, __u16 limit)
26110 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26112 struct desc_ptr curgdt;
26114 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
26117 control_page = page_address(image->control_code_page);
26118 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26119 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26121 relocate_kernel_ptr = control_page;
26122 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26123 diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26124 index 94ea120..4154cea 100644
26125 --- a/arch/x86/kernel/mcount_64.S
26126 +++ b/arch/x86/kernel/mcount_64.S
26128 #include <linux/linkage.h>
26129 #include <asm/ptrace.h>
26130 #include <asm/ftrace.h>
26132 +#include <asm/alternative-asm.h>
26135 .section .entry.text, "ax"
26136 @@ -148,8 +148,9 @@
26137 #ifdef CONFIG_DYNAMIC_FTRACE
26139 ENTRY(function_hook)
26140 + pax_force_retaddr
26142 -END(function_hook)
26143 +ENDPROC(function_hook)
26145 ENTRY(ftrace_caller)
26146 /* save_mcount_regs fills in first two parameters */
26147 @@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
26150 GLOBAL(ftrace_stub)
26151 + pax_force_retaddr
26153 -END(ftrace_caller)
26154 +ENDPROC(ftrace_caller)
26156 ENTRY(ftrace_regs_caller)
26157 /* Save the current flags before any operations that can change them */
26158 @@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
26162 -END(ftrace_regs_caller)
26163 +ENDPROC(ftrace_regs_caller)
26166 #else /* ! CONFIG_DYNAMIC_FTRACE */
26167 @@ -272,18 +274,20 @@ fgraph_trace:
26170 GLOBAL(ftrace_stub)
26171 + pax_force_retaddr
26175 /* save_mcount_regs fills in first two parameters */
26178 + pax_force_fptr ftrace_trace_function
26179 call *ftrace_trace_function
26181 restore_mcount_regs
26184 -END(function_hook)
26185 +ENDPROC(function_hook)
26186 #endif /* CONFIG_DYNAMIC_FTRACE */
26187 #endif /* CONFIG_FUNCTION_TRACER */
26189 @@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
26191 restore_mcount_regs
26193 + pax_force_retaddr
26195 -END(ftrace_graph_caller)
26196 +ENDPROC(ftrace_graph_caller)
26198 GLOBAL(return_to_handler)
26200 @@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
26204 + pax_force_fptr %rdi
26206 +ENDPROC(return_to_handler)
26208 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26209 index 005c03e..2f440cd 100644
26210 --- a/arch/x86/kernel/module.c
26211 +++ b/arch/x86/kernel/module.c
26212 @@ -75,17 +75,17 @@ static unsigned long int get_module_load_offset(void)
26216 -void *module_alloc(unsigned long size)
26217 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26221 - if (PAGE_ALIGN(size) > MODULES_LEN)
26222 + if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26225 p = __vmalloc_node_range(size, MODULE_ALIGN,
26226 MODULES_VADDR + get_module_load_offset(),
26227 - MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26228 - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
26229 + MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26230 + prot, 0, NUMA_NO_NODE,
26231 __builtin_return_address(0));
26232 if (p && (kasan_module_alloc(p, size) < 0)) {
26234 @@ -95,6 +95,51 @@ void *module_alloc(unsigned long size)
26238 +void *module_alloc(unsigned long size)
26241 +#ifdef CONFIG_PAX_KERNEXEC
26242 + return __module_alloc(size, PAGE_KERNEL);
26244 + return __module_alloc(size, PAGE_KERNEL_EXEC);
26249 +#ifdef CONFIG_PAX_KERNEXEC
26250 +#ifdef CONFIG_X86_32
26251 +void *module_alloc_exec(unsigned long size)
26253 + struct vm_struct *area;
26258 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26259 +return area ? area->addr : NULL;
26261 +EXPORT_SYMBOL(module_alloc_exec);
26263 +void module_memfree_exec(void *module_region)
26265 + vunmap(module_region);
26267 +EXPORT_SYMBOL(module_memfree_exec);
26269 +void module_memfree_exec(void *module_region)
26271 + module_memfree(module_region);
26273 +EXPORT_SYMBOL(module_memfree_exec);
26275 +void *module_alloc_exec(unsigned long size)
26277 + return __module_alloc(size, PAGE_KERNEL_RX);
26279 +EXPORT_SYMBOL(module_alloc_exec);
26283 #ifdef CONFIG_X86_32
26284 int apply_relocate(Elf32_Shdr *sechdrs,
26285 const char *strtab,
26286 @@ -105,14 +150,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26288 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26290 - uint32_t *location;
26291 + uint32_t *plocation, location;
26293 DEBUGP("Applying relocate section %u to %u\n",
26294 relsec, sechdrs[relsec].sh_info);
26295 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26296 /* This is where to make the change */
26297 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26298 - + rel[i].r_offset;
26299 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26300 + location = (uint32_t)plocation;
26301 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26302 + plocation = ktla_ktva((void *)plocation);
26303 /* This is the symbol it is referring to. Note that all
26304 undefined symbols have been resolved. */
26305 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26306 @@ -121,11 +168,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26307 switch (ELF32_R_TYPE(rel[i].r_info)) {
26309 /* We add the value into the location given */
26310 - *location += sym->st_value;
26311 + pax_open_kernel();
26312 + *plocation += sym->st_value;
26313 + pax_close_kernel();
26316 /* Add the value, subtract its position */
26317 - *location += sym->st_value - (uint32_t)location;
26318 + pax_open_kernel();
26319 + *plocation += sym->st_value - location;
26320 + pax_close_kernel();
26323 pr_err("%s: Unknown relocation: %u\n",
26324 @@ -170,21 +221,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26325 case R_X86_64_NONE:
26328 + pax_open_kernel();
26330 + pax_close_kernel();
26333 + pax_open_kernel();
26335 + pax_close_kernel();
26336 if (val != *(u32 *)loc)
26340 + pax_open_kernel();
26342 + pax_close_kernel();
26343 if ((s64)val != *(s32 *)loc)
26346 case R_X86_64_PC32:
26348 + pax_open_kernel();
26350 + pax_close_kernel();
26353 if ((s64)val != *(s32 *)loc)
26355 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26356 index 113e707..0a690e1 100644
26357 --- a/arch/x86/kernel/msr.c
26358 +++ b/arch/x86/kernel/msr.c
26360 #include <linux/notifier.h>
26361 #include <linux/uaccess.h>
26362 #include <linux/gfp.h>
26363 +#include <linux/grsecurity.h>
26365 #include <asm/processor.h>
26366 #include <asm/msr.h>
26367 @@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26371 +#ifdef CONFIG_GRKERNSEC_KMEM
26372 + gr_handle_msr_write();
26377 return -EINVAL; /* Invalid chunk size */
26379 @@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26383 +#ifdef CONFIG_GRKERNSEC_KMEM
26384 + gr_handle_msr_write();
26387 if (copy_from_user(®s, uregs, sizeof regs)) {
26390 @@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26391 return notifier_from_errno(err);
26394 -static struct notifier_block __refdata msr_class_cpu_notifier = {
26395 +static struct notifier_block msr_class_cpu_notifier = {
26396 .notifier_call = msr_class_cpu_callback,
26399 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26400 index c3e985d..f690edd 100644
26401 --- a/arch/x86/kernel/nmi.c
26402 +++ b/arch/x86/kernel/nmi.c
26403 @@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26405 static void nmi_max_handler(struct irq_work *w)
26407 - struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26408 + struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26409 int remainder_ns, decimal_msecs;
26410 - u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26411 + u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26413 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26414 decimal_msecs = remainder_ns / 1000;
26416 printk_ratelimited(KERN_INFO
26417 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26418 - a->handler, whole_msecs, decimal_msecs);
26419 + n->action->handler, whole_msecs, decimal_msecs);
26422 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26423 @@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26424 delta = sched_clock() - delta;
26425 trace_nmi_handler(a->handler, (int)delta, thishandled);
26427 - if (delta < nmi_longest_ns || delta < a->max_duration)
26428 + if (delta < nmi_longest_ns || delta < a->work->max_duration)
26431 - a->max_duration = delta;
26432 - irq_work_queue(&a->irq_work);
26433 + a->work->max_duration = delta;
26434 + irq_work_queue(&a->work->irq_work);
26438 @@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26440 NOKPROBE_SYMBOL(nmi_handle);
26442 -int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26443 +int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26445 struct nmi_desc *desc = nmi_to_desc(type);
26446 unsigned long flags;
26447 @@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26448 if (!action->handler)
26451 - init_irq_work(&action->irq_work, nmi_max_handler);
26452 + action->work->action = action;
26453 + init_irq_work(&action->work->irq_work, nmi_max_handler);
26455 spin_lock_irqsave(&desc->lock, flags);
26457 @@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26458 * event confuses some handlers (kdump uses this flag)
26460 if (action->flags & NMI_FLAG_FIRST)
26461 - list_add_rcu(&action->list, &desc->head);
26462 + pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26464 - list_add_tail_rcu(&action->list, &desc->head);
26465 + pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26467 spin_unlock_irqrestore(&desc->lock, flags);
26469 @@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26470 if (!strcmp(n->name, name)) {
26472 "Trying to free NMI (%s) from NMI context!\n", n->name);
26473 - list_del_rcu(&n->list);
26474 + pax_list_del_rcu((struct list_head *)&n->list);
26478 @@ -408,15 +409,15 @@ static void default_do_nmi(struct pt_regs *regs)
26479 NOKPROBE_SYMBOL(default_do_nmi);
26482 - * NMIs can hit breakpoints which will cause it to lose its
26483 - * NMI context with the CPU when the breakpoint does an iret.
26485 -#ifdef CONFIG_X86_32
26487 - * For i386, NMIs use the same stack as the kernel, and we can
26488 - * add a workaround to the iret problem in C (preventing nested
26489 - * NMIs if an NMI takes a trap). Simply have 3 states the NMI
26491 + * NMIs can page fault or hit breakpoints which will cause it to lose
26492 + * its NMI context with the CPU when the breakpoint or page fault does an IRET.
26494 + * As a result, NMIs can nest if NMIs get unmasked due an IRET during
26495 + * NMI processing. On x86_64, the asm glue protects us from nested NMIs
26496 + * if the outer NMI came from kernel mode, but we can still nest if the
26497 + * outer NMI came from user mode.
26499 + * To handle these nested NMIs, we have three states:
26503 @@ -430,15 +431,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
26504 * (Note, the latch is binary, thus multiple NMIs triggering,
26505 * when one is running, are ignored. Only one NMI is restarted.)
26507 - * If an NMI hits a breakpoint that executes an iret, another
26508 - * NMI can preempt it. We do not want to allow this new NMI
26509 - * to run, but we want to execute it when the first one finishes.
26510 - * We set the state to "latched", and the exit of the first NMI will
26511 - * perform a dec_return, if the result is zero (NOT_RUNNING), then
26512 - * it will simply exit the NMI handler. If not, the dec_return
26513 - * would have set the state to NMI_EXECUTING (what we want it to
26514 - * be when we are running). In this case, we simply jump back
26515 - * to rerun the NMI handler again, and restart the 'latched' NMI.
26516 + * If an NMI executes an iret, another NMI can preempt it. We do not
26517 + * want to allow this new NMI to run, but we want to execute it when the
26518 + * first one finishes. We set the state to "latched", and the exit of
26519 + * the first NMI will perform a dec_return, if the result is zero
26520 + * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
26521 + * dec_return would have set the state to NMI_EXECUTING (what we want it
26522 + * to be when we are running). In this case, we simply jump back to
26523 + * rerun the NMI handler again, and restart the 'latched' NMI.
26525 * No trap (breakpoint or page fault) should be hit before nmi_restart,
26526 * thus there is no race between the first check of state for NOT_RUNNING
26527 @@ -461,49 +461,47 @@ enum nmi_states {
26528 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
26529 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
26531 -#define nmi_nesting_preprocess(regs) \
26533 - if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
26534 - this_cpu_write(nmi_state, NMI_LATCHED); \
26537 - this_cpu_write(nmi_state, NMI_EXECUTING); \
26538 - this_cpu_write(nmi_cr2, read_cr2()); \
26542 -#define nmi_nesting_postprocess() \
26544 - if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
26545 - write_cr2(this_cpu_read(nmi_cr2)); \
26546 - if (this_cpu_dec_return(nmi_state)) \
26547 - goto nmi_restart; \
26549 -#else /* x86_64 */
26550 +#ifdef CONFIG_X86_64
26552 - * In x86_64 things are a bit more difficult. This has the same problem
26553 - * where an NMI hitting a breakpoint that calls iret will remove the
26554 - * NMI context, allowing a nested NMI to enter. What makes this more
26555 - * difficult is that both NMIs and breakpoints have their own stack.
26556 - * When a new NMI or breakpoint is executed, the stack is set to a fixed
26557 - * point. If an NMI is nested, it will have its stack set at that same
26558 - * fixed address that the first NMI had, and will start corrupting the
26559 - * stack. This is handled in entry_64.S, but the same problem exists with
26560 - * the breakpoint stack.
26561 + * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
26562 + * some care, the inner breakpoint will clobber the outer breakpoint's
26565 - * If a breakpoint is being processed, and the debug stack is being used,
26566 - * if an NMI comes in and also hits a breakpoint, the stack pointer
26567 - * will be set to the same fixed address as the breakpoint that was
26568 - * interrupted, causing that stack to be corrupted. To handle this case,
26569 - * check if the stack that was interrupted is the debug stack, and if
26570 - * so, change the IDT so that new breakpoints will use the current stack
26571 - * and not switch to the fixed address. On return of the NMI, switch back
26572 - * to the original IDT.
26573 + * If a breakpoint is being processed, and the debug stack is being
26574 + * used, if an NMI comes in and also hits a breakpoint, the stack
26575 + * pointer will be set to the same fixed address as the breakpoint that
26576 + * was interrupted, causing that stack to be corrupted. To handle this
26577 + * case, check if the stack that was interrupted is the debug stack, and
26578 + * if so, change the IDT so that new breakpoints will use the current
26579 + * stack and not switch to the fixed address. On return of the NMI,
26580 + * switch back to the original IDT.
26582 static DEFINE_PER_CPU(int, update_debug_stack);
26585 -static inline void nmi_nesting_preprocess(struct pt_regs *regs)
26586 +dotraplinkage notrace void
26587 +do_nmi(struct pt_regs *regs, long error_code)
26590 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26591 + if (!user_mode(regs)) {
26592 + unsigned long cs = regs->cs & 0xFFFF;
26593 + unsigned long ip = ktva_ktla(regs->ip);
26595 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26600 + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
26601 + this_cpu_write(nmi_state, NMI_LATCHED);
26604 + this_cpu_write(nmi_state, NMI_EXECUTING);
26605 + this_cpu_write(nmi_cr2, read_cr2());
26608 +#ifdef CONFIG_X86_64
26610 * If we interrupted a breakpoint, it is possible that
26611 * the nmi handler will have breakpoints too. We need to
26612 @@ -514,22 +512,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
26613 debug_stack_set_zero();
26614 this_cpu_write(update_debug_stack, 1);
26618 -static inline void nmi_nesting_postprocess(void)
26620 - if (unlikely(this_cpu_read(update_debug_stack))) {
26621 - debug_stack_reset();
26622 - this_cpu_write(update_debug_stack, 0);
26627 -dotraplinkage notrace void
26628 -do_nmi(struct pt_regs *regs, long error_code)
26630 - nmi_nesting_preprocess(regs);
26634 inc_irq_stat(__nmi_count);
26635 @@ -539,8 +523,17 @@ do_nmi(struct pt_regs *regs, long error_code)
26639 - /* On i386, may loop back to preprocess */
26640 - nmi_nesting_postprocess();
26641 +#ifdef CONFIG_X86_64
26642 + if (unlikely(this_cpu_read(update_debug_stack))) {
26643 + debug_stack_reset();
26644 + this_cpu_write(update_debug_stack, 0);
26648 + if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
26649 + write_cr2(this_cpu_read(nmi_cr2));
26650 + if (this_cpu_dec_return(nmi_state))
26651 + goto nmi_restart;
26653 NOKPROBE_SYMBOL(do_nmi);
26655 diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26656 index 6d9582e..f746287 100644
26657 --- a/arch/x86/kernel/nmi_selftest.c
26658 +++ b/arch/x86/kernel/nmi_selftest.c
26659 @@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26661 /* trap all the unknown NMIs we may generate */
26662 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26667 static void __init cleanup_nmi_testsuite(void)
26668 @@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26669 unsigned long timeout;
26671 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26672 - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26673 + NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26674 nmi_fail = FAILURE;
26677 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26678 index bbb6c73..24a58ef 100644
26679 --- a/arch/x86/kernel/paravirt-spinlocks.c
26680 +++ b/arch/x86/kernel/paravirt-spinlocks.c
26683 #include <asm/paravirt.h>
26685 -struct pv_lock_ops pv_lock_ops = {
26686 +struct pv_lock_ops pv_lock_ops __read_only = {
26688 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26689 .unlock_kick = paravirt_nop,
26690 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26691 index c614dd4..9ad659e 100644
26692 --- a/arch/x86/kernel/paravirt.c
26693 +++ b/arch/x86/kernel/paravirt.c
26694 @@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26698 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26699 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26702 void __init default_banner(void)
26704 @@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26706 if (opfunc == NULL)
26707 /* If there's no function, patch it with a ud2a (BUG) */
26708 - ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26709 - else if (opfunc == _paravirt_nop)
26710 + ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26711 + else if (opfunc == (void *)_paravirt_nop)
26712 /* If the operation is a nop, then nop the callsite */
26713 ret = paravirt_patch_nop();
26715 /* identity functions just return their single argument */
26716 - else if (opfunc == _paravirt_ident_32)
26717 + else if (opfunc == (void *)_paravirt_ident_32)
26718 ret = paravirt_patch_ident_32(insnbuf, len);
26719 - else if (opfunc == _paravirt_ident_64)
26720 + else if (opfunc == (void *)_paravirt_ident_64)
26721 ret = paravirt_patch_ident_64(insnbuf, len);
26722 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26723 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26724 + ret = paravirt_patch_ident_64(insnbuf, len);
26727 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26728 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26729 @@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26730 if (insn_len > len || start == NULL)
26733 - memcpy(insnbuf, start, insn_len);
26734 + memcpy(insnbuf, ktla_ktva(start), insn_len);
26738 @@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26739 return this_cpu_read(paravirt_lazy_mode);
26742 -struct pv_info pv_info = {
26743 +struct pv_info pv_info __read_only = {
26744 .name = "bare hardware",
26745 .paravirt_enabled = 0,
26747 @@ -311,16 +318,16 @@ struct pv_info pv_info = {
26751 -struct pv_init_ops pv_init_ops = {
26752 +struct pv_init_ops pv_init_ops __read_only = {
26753 .patch = native_patch,
26756 -struct pv_time_ops pv_time_ops = {
26757 +struct pv_time_ops pv_time_ops __read_only = {
26758 .sched_clock = native_sched_clock,
26759 .steal_clock = native_steal_clock,
26762 -__visible struct pv_irq_ops pv_irq_ops = {
26763 +__visible struct pv_irq_ops pv_irq_ops __read_only = {
26764 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26765 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26766 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26767 @@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26771 -__visible struct pv_cpu_ops pv_cpu_ops = {
26772 +__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26773 .cpuid = native_cpuid,
26774 .get_debugreg = native_get_debugreg,
26775 .set_debugreg = native_set_debugreg,
26776 @@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26777 NOKPROBE_SYMBOL(native_set_debugreg);
26778 NOKPROBE_SYMBOL(native_load_idt);
26780 -struct pv_apic_ops pv_apic_ops = {
26781 +struct pv_apic_ops pv_apic_ops __read_only= {
26782 #ifdef CONFIG_X86_LOCAL_APIC
26783 .startup_ipi_hook = paravirt_nop,
26787 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26788 +#ifdef CONFIG_X86_32
26789 +#ifdef CONFIG_X86_PAE
26790 +/* 64-bit pagetable entries */
26791 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26793 /* 32-bit pagetable entries */
26794 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26797 /* 64-bit pagetable entries */
26798 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26801 -struct pv_mmu_ops pv_mmu_ops = {
26802 +struct pv_mmu_ops pv_mmu_ops __read_only = {
26804 .read_cr2 = native_read_cr2,
26805 .write_cr2 = native_write_cr2,
26806 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26807 .make_pud = PTE_IDENT,
26809 .set_pgd = native_set_pgd,
26810 + .set_pgd_batched = native_set_pgd_batched,
26812 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
26814 @@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26817 .set_fixmap = native_set_fixmap,
26819 +#ifdef CONFIG_PAX_KERNEXEC
26820 + .pax_open_kernel = native_pax_open_kernel,
26821 + .pax_close_kernel = native_pax_close_kernel,
26826 EXPORT_SYMBOL_GPL(pv_time_ops);
26827 diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26828 index a1da673..b6f5831 100644
26829 --- a/arch/x86/kernel/paravirt_patch_64.c
26830 +++ b/arch/x86/kernel/paravirt_patch_64.c
26831 @@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26832 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26833 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26834 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26836 +#ifndef CONFIG_PAX_MEMORY_UDEREF
26837 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26840 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26841 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26843 @@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26844 PATCH_SITE(pv_mmu_ops, read_cr3);
26845 PATCH_SITE(pv_mmu_ops, write_cr3);
26846 PATCH_SITE(pv_cpu_ops, clts);
26848 +#ifndef CONFIG_PAX_MEMORY_UDEREF
26849 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26852 PATCH_SITE(pv_cpu_ops, wbinvd);
26855 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26856 index 0497f71..7186c0d 100644
26857 --- a/arch/x86/kernel/pci-calgary_64.c
26858 +++ b/arch/x86/kernel/pci-calgary_64.c
26859 @@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26860 tce_space = be64_to_cpu(readq(target));
26861 tce_space = tce_space & TAR_SW_BITS;
26863 - tce_space = tce_space & (~specified_table_size);
26864 + tce_space = tce_space & (~(unsigned long)specified_table_size);
26865 info->tce_space = (u64 *)__va(tce_space);
26868 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26869 index 35ccf75..7a15747 100644
26870 --- a/arch/x86/kernel/pci-iommu_table.c
26871 +++ b/arch/x86/kernel/pci-iommu_table.c
26873 #include <asm/iommu_table.h>
26874 #include <linux/string.h>
26875 #include <linux/kallsyms.h>
26877 +#include <linux/sched.h>
26881 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26882 index 77dd0ad..9ec4723 100644
26883 --- a/arch/x86/kernel/pci-swiotlb.c
26884 +++ b/arch/x86/kernel/pci-swiotlb.c
26885 @@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26886 struct dma_attrs *attrs)
26888 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26889 - swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26890 + swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26892 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26894 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26895 index 6e338e3..82f946e 100644
26896 --- a/arch/x86/kernel/process.c
26897 +++ b/arch/x86/kernel/process.c
26899 * section. Since TSS's are completely CPU-local, we want them
26900 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26902 -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
26903 +struct tss_struct cpu_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = {
26904 + [0 ... NR_CPUS-1] = {
26906 .sp0 = TOP_OF_INIT_STACK,
26907 #ifdef CONFIG_X86_32
26908 @@ -56,6 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
26910 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
26914 EXPORT_PER_CPU_SYMBOL(cpu_tss);
26916 @@ -115,7 +117,7 @@ void arch_task_cache_init(void)
26917 task_xstate_cachep =
26918 kmem_cache_create("task_xstate", xstate_size,
26919 __alignof__(union thread_xstate),
26920 - SLAB_PANIC | SLAB_NOTRACK, NULL);
26921 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26922 setup_xstate_comp();
26925 @@ -129,7 +131,7 @@ void exit_thread(void)
26926 unsigned long *bp = t->io_bitmap_ptr;
26929 - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
26930 + struct tss_struct *tss = cpu_tss + get_cpu();
26932 t->io_bitmap_ptr = NULL;
26933 clear_thread_flag(TIF_IO_BITMAP);
26934 @@ -149,6 +151,9 @@ void flush_thread(void)
26936 struct task_struct *tsk = current;
26938 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26939 + loadsegment(gs, 0);
26941 flush_ptrace_hw_breakpoint(tsk);
26942 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26944 @@ -302,7 +307,7 @@ static void __exit_idle(void)
26945 void exit_idle(void)
26947 /* idle loop has pid 0 */
26948 - if (current->pid)
26949 + if (task_pid_nr(current))
26953 @@ -355,7 +360,7 @@ bool xen_set_default_idle(void)
26957 -void stop_this_cpu(void *dummy)
26958 +__noreturn void stop_this_cpu(void *dummy)
26960 local_irq_disable();
26962 @@ -531,16 +536,43 @@ static int __init idle_setup(char *str)
26964 early_param("idle", idle_setup);
26966 -unsigned long arch_align_stack(unsigned long sp)
26968 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26969 - sp -= get_random_int() % 8192;
26970 - return sp & ~0xf;
26973 unsigned long arch_randomize_brk(struct mm_struct *mm)
26975 unsigned long range_end = mm->brk + 0x02000000;
26976 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26979 +#ifdef CONFIG_PAX_RANDKSTACK
26980 +void pax_randomize_kstack(struct pt_regs *regs)
26982 + struct thread_struct *thread = ¤t->thread;
26983 + unsigned long time;
26985 + if (!randomize_va_space)
26988 + if (v8086_mode(regs))
26993 + /* P4 seems to return a 0 LSB, ignore it */
26994 +#ifdef CONFIG_MPENTIUM4
26997 +#elif defined(CONFIG_X86_64)
27005 + thread->sp0 ^= time;
27006 + load_sp0(cpu_tss + smp_processor_id(), thread);
27008 +#ifdef CONFIG_X86_64
27009 + this_cpu_write(kernel_stack, thread->sp0);
27013 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
27014 index 8ed2106..1345704 100644
27015 --- a/arch/x86/kernel/process_32.c
27016 +++ b/arch/x86/kernel/process_32.c
27017 @@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
27018 unsigned long thread_saved_pc(struct task_struct *tsk)
27020 return ((unsigned long *)tsk->thread.sp)[3];
27021 +//XXX return tsk->thread.eip;
27024 void __show_regs(struct pt_regs *regs, int all)
27025 @@ -76,16 +77,15 @@ void __show_regs(struct pt_regs *regs, int all)
27026 if (user_mode(regs)) {
27028 ss = regs->ss & 0xffff;
27029 - gs = get_user_gs(regs);
27031 sp = kernel_stack_pointer(regs);
27032 savesegment(ss, ss);
27033 - savesegment(gs, gs);
27035 + gs = get_user_gs(regs);
27037 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
27038 (u16)regs->cs, regs->ip, regs->flags,
27039 - smp_processor_id());
27040 + raw_smp_processor_id());
27041 print_symbol("EIP is at %s\n", regs->ip);
27043 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
27044 @@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
27045 int copy_thread(unsigned long clone_flags, unsigned long sp,
27046 unsigned long arg, struct task_struct *p)
27048 - struct pt_regs *childregs = task_pt_regs(p);
27049 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
27050 struct task_struct *tsk;
27053 p->thread.sp = (unsigned long) childregs;
27054 p->thread.sp0 = (unsigned long) (childregs+1);
27055 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
27056 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27058 if (unlikely(p->flags & PF_KTHREAD)) {
27059 /* kernel thread */
27060 memset(childregs, 0, sizeof(struct pt_regs));
27061 p->thread.ip = (unsigned long) ret_from_kernel_thread;
27062 - task_user_gs(p) = __KERNEL_STACK_CANARY;
27063 - childregs->ds = __USER_DS;
27064 - childregs->es = __USER_DS;
27065 + savesegment(gs, childregs->gs);
27066 + childregs->ds = __KERNEL_DS;
27067 + childregs->es = __KERNEL_DS;
27068 childregs->fs = __KERNEL_PERCPU;
27069 childregs->bx = sp; /* function */
27070 childregs->bp = arg;
27071 @@ -244,7 +245,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27072 struct thread_struct *prev = &prev_p->thread,
27073 *next = &next_p->thread;
27074 int cpu = smp_processor_id();
27075 - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
27076 + struct tss_struct *tss = cpu_tss + cpu;
27079 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
27080 @@ -263,6 +264,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27082 lazy_save_gs(prev->gs);
27084 +#ifdef CONFIG_PAX_MEMORY_UDEREF
27085 + __set_fs(task_thread_info(next_p)->addr_limit);
27089 * Load the per-thread Thread-Local Storage descriptor.
27091 @@ -306,12 +311,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27092 * current_thread_info().
27094 load_sp0(tss, next);
27095 - this_cpu_write(kernel_stack,
27096 - (unsigned long)task_stack_page(next_p) +
27098 - this_cpu_write(cpu_current_top_of_stack,
27099 - (unsigned long)task_stack_page(next_p) +
27101 + this_cpu_write(current_task, next_p);
27102 + this_cpu_write(current_tinfo, &next_p->tinfo);
27103 + this_cpu_write(kernel_stack, next->sp0);
27104 + this_cpu_write(cpu_current_top_of_stack, next->sp0);
27107 * Restore %gs if needed (which is common)
27108 @@ -321,8 +324,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27110 switch_fpu_finish(next_p, fpu);
27112 - this_cpu_write(current_task, next_p);
27117 @@ -352,4 +353,3 @@ unsigned long get_wchan(struct task_struct *p)
27118 } while (count++ < 16);
27122 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
27123 index ddfdbf7..625417c 100644
27124 --- a/arch/x86/kernel/process_64.c
27125 +++ b/arch/x86/kernel/process_64.c
27126 @@ -158,9 +158,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27127 struct pt_regs *childregs;
27128 struct task_struct *me = current;
27130 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
27131 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
27132 childregs = task_pt_regs(p);
27133 p->thread.sp = (unsigned long) childregs;
27134 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
27135 set_tsk_thread_flag(p, TIF_FORK);
27136 p->thread.io_bitmap_ptr = NULL;
27138 @@ -170,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27139 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
27140 savesegment(es, p->thread.es);
27141 savesegment(ds, p->thread.ds);
27142 + savesegment(ss, p->thread.ss);
27143 + BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
27144 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27146 if (unlikely(p->flags & PF_KTHREAD)) {
27147 @@ -275,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27148 struct thread_struct *prev = &prev_p->thread;
27149 struct thread_struct *next = &next_p->thread;
27150 int cpu = smp_processor_id();
27151 - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
27152 + struct tss_struct *tss = cpu_tss + cpu;
27153 unsigned fsindex, gsindex;
27156 @@ -326,6 +329,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27157 if (unlikely(next->ds | prev->ds))
27158 loadsegment(ds, next->ds);
27160 + savesegment(ss, prev->ss);
27161 + if (unlikely(next->ss != prev->ss))
27162 + loadsegment(ss, next->ss);
27165 * Switch FS and GS.
27167 @@ -397,6 +404,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27168 * Switch the PDA and FPU contexts.
27170 this_cpu_write(current_task, next_p);
27171 + this_cpu_write(current_tinfo, &next_p->tinfo);
27174 * If it were not for PREEMPT_ACTIVE we could guarantee that the
27175 @@ -409,8 +417,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27176 /* Reload esp0 and ss1. This changes current_thread_info(). */
27177 load_sp0(tss, next);
27179 - this_cpu_write(kernel_stack,
27180 - (unsigned long)task_stack_page(next_p) + THREAD_SIZE);
27181 + this_cpu_write(kernel_stack, next->sp0);
27184 * Now maybe reload the debug registers and handle I/O bitmaps
27185 @@ -508,12 +515,11 @@ unsigned long get_wchan(struct task_struct *p)
27186 if (!p || p == current || p->state == TASK_RUNNING)
27188 stack = (unsigned long)task_stack_page(p);
27189 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
27190 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
27192 fp = *(u64 *)(p->thread.sp);
27194 - if (fp < (unsigned long)stack ||
27195 - fp >= (unsigned long)stack+THREAD_SIZE)
27196 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
27198 ip = *(u64 *)(fp+8);
27199 if (!in_sched_functions(ip))
27200 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
27201 index a7bc794..094ee8e 100644
27202 --- a/arch/x86/kernel/ptrace.c
27203 +++ b/arch/x86/kernel/ptrace.c
27204 @@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
27205 unsigned long sp = (unsigned long)®s->sp;
27208 - if (context == (sp & ~(THREAD_SIZE - 1)))
27209 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
27212 - prev_esp = (u32 *)(context);
27213 + prev_esp = *(u32 **)(context);
27215 return (unsigned long)prev_esp;
27217 @@ -446,6 +446,20 @@ static int putreg(struct task_struct *child,
27218 if (child->thread.gs != value)
27219 return do_arch_prctl(child, ARCH_SET_GS, value);
27222 + case offsetof(struct user_regs_struct,ip):
27224 + * Protect against any attempt to set ip to an
27225 + * impossible address. There are dragons lurking if the
27226 + * address is noncanonical. (This explicitly allows
27227 + * setting ip to TASK_SIZE_MAX, because user code can do
27228 + * that all by itself by running off the end of its
27231 + if (value > TASK_SIZE_MAX)
27238 @@ -582,7 +596,7 @@ static void ptrace_triggered(struct perf_event *bp,
27239 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27243 + unsigned long dr7 = 0;
27244 struct arch_hw_breakpoint *info;
27246 for (i = 0; i < HBP_NUM; i++) {
27247 @@ -816,7 +830,7 @@ long arch_ptrace(struct task_struct *child, long request,
27248 unsigned long addr, unsigned long data)
27251 - unsigned long __user *datap = (unsigned long __user *)data;
27252 + unsigned long __user *datap = (__force unsigned long __user *)data;
27255 /* read the word at location addr in the USER area. */
27256 @@ -901,14 +915,14 @@ long arch_ptrace(struct task_struct *child, long request,
27257 if ((int) addr < 0)
27259 ret = do_get_thread_area(child, addr,
27260 - (struct user_desc __user *)data);
27261 + (__force struct user_desc __user *) data);
27264 case PTRACE_SET_THREAD_AREA:
27265 if ((int) addr < 0)
27267 ret = do_set_thread_area(child, addr,
27268 - (struct user_desc __user *)data, 0);
27269 + (__force struct user_desc __user *) data, 0);
27273 @@ -1286,7 +1300,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27275 #ifdef CONFIG_X86_64
27277 -static struct user_regset x86_64_regsets[] __read_mostly = {
27278 +static user_regset_no_const x86_64_regsets[] __read_only = {
27279 [REGSET_GENERAL] = {
27280 .core_note_type = NT_PRSTATUS,
27281 .n = sizeof(struct user_regs_struct) / sizeof(long),
27282 @@ -1327,7 +1341,7 @@ static const struct user_regset_view user_x86_64_view = {
27283 #endif /* CONFIG_X86_64 */
27285 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27286 -static struct user_regset x86_32_regsets[] __read_mostly = {
27287 +static user_regset_no_const x86_32_regsets[] __read_only = {
27288 [REGSET_GENERAL] = {
27289 .core_note_type = NT_PRSTATUS,
27290 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27291 @@ -1380,7 +1394,7 @@ static const struct user_regset_view user_x86_32_view = {
27293 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27295 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27296 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27298 #ifdef CONFIG_X86_64
27299 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27300 @@ -1415,7 +1429,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27301 memset(info, 0, sizeof(*info));
27302 info->si_signo = SIGTRAP;
27303 info->si_code = si_code;
27304 - info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
27305 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27308 void user_single_step_siginfo(struct task_struct *tsk,
27309 @@ -1449,6 +1463,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
27313 +#ifdef CONFIG_GRKERNSEC_SETXID
27314 +extern void gr_delayed_cred_worker(void);
27318 * We can return 0 to resume the syscall or anything else to go to phase
27319 * 2. If we resume the syscall, we need to put something appropriate in
27320 @@ -1556,6 +1574,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
27322 BUG_ON(regs != task_pt_regs(current));
27324 +#ifdef CONFIG_GRKERNSEC_SETXID
27325 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27326 + gr_delayed_cred_worker();
27330 * If we stepped into a sysenter/syscall insn, it trapped in
27331 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27332 @@ -1614,6 +1637,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27336 +#ifdef CONFIG_GRKERNSEC_SETXID
27337 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27338 + gr_delayed_cred_worker();
27341 audit_syscall_exit(regs);
27343 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27344 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27345 index 2f355d2..e75ed0a 100644
27346 --- a/arch/x86/kernel/pvclock.c
27347 +++ b/arch/x86/kernel/pvclock.c
27348 @@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27349 reset_hung_task_detector();
27352 -static atomic64_t last_value = ATOMIC64_INIT(0);
27353 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27355 void pvclock_resume(void)
27357 - atomic64_set(&last_value, 0);
27358 + atomic64_set_unchecked(&last_value, 0);
27361 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27362 @@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27363 * updating at the same time, and one of them could be slightly behind,
27364 * making the assumption that last_value always go forward fail to hold.
27366 - last = atomic64_read(&last_value);
27367 + last = atomic64_read_unchecked(&last_value);
27371 - last = atomic64_cmpxchg(&last_value, last, ret);
27372 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27373 } while (unlikely(last != ret));
27376 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27377 index 86db4bc..a50a54a 100644
27378 --- a/arch/x86/kernel/reboot.c
27379 +++ b/arch/x86/kernel/reboot.c
27380 @@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27382 void __noreturn machine_real_restart(unsigned int type)
27385 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27386 + struct desc_struct *gdt;
27389 local_irq_disable();
27392 @@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
27394 /* Jump to the identity-mapped low memory code */
27395 #ifdef CONFIG_X86_32
27396 - asm volatile("jmpl *%0" : :
27398 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27399 + gdt = get_cpu_gdt_table(smp_processor_id());
27400 + pax_open_kernel();
27401 +#ifdef CONFIG_PAX_MEMORY_UDEREF
27402 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27403 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27404 + loadsegment(ds, __KERNEL_DS);
27405 + loadsegment(es, __KERNEL_DS);
27406 + loadsegment(ss, __KERNEL_DS);
27408 +#ifdef CONFIG_PAX_KERNEXEC
27409 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27410 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27411 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27412 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27413 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27414 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27416 + pax_close_kernel();
27419 + asm volatile("ljmpl *%0" : :
27420 "rm" (real_mode_header->machine_real_restart_asm),
27423 @@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
27425 * This is a single dmi_table handling all reboot quirks.
27427 -static struct dmi_system_id __initdata reboot_dmi_table[] = {
27428 +static const struct dmi_system_id __initconst reboot_dmi_table[] = {
27431 { /* Handle reboot issue on Acer Aspire one */
27432 @@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27433 * This means that this function can never return, it can misbehave
27434 * by not rebooting properly and hanging.
27436 -static void native_machine_emergency_restart(void)
27437 +static void __noreturn native_machine_emergency_restart(void)
27441 @@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27445 -static void __machine_emergency_restart(int emergency)
27446 +static void __noreturn __machine_emergency_restart(int emergency)
27448 reboot_emergency = emergency;
27449 machine_ops.emergency_restart();
27452 -static void native_machine_restart(char *__unused)
27453 +static void __noreturn native_machine_restart(char *__unused)
27455 pr_notice("machine restart\n");
27457 @@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27458 __machine_emergency_restart(0);
27461 -static void native_machine_halt(void)
27462 +static void __noreturn native_machine_halt(void)
27464 /* Stop other cpus and apics */
27465 machine_shutdown();
27466 @@ -656,7 +683,7 @@ static void native_machine_halt(void)
27467 stop_this_cpu(NULL);
27470 -static void native_machine_power_off(void)
27471 +static void __noreturn native_machine_power_off(void)
27473 if (pm_power_off) {
27475 @@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27477 /* A fallback in case there is no PM info available */
27478 tboot_shutdown(TB_SHUTDOWN_HALT);
27482 -struct machine_ops machine_ops = {
27483 +struct machine_ops machine_ops __read_only = {
27484 .power_off = native_machine_power_off,
27485 .shutdown = native_machine_shutdown,
27486 .emergency_restart = native_machine_emergency_restart,
27487 diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27488 index c8e41e9..64049ef 100644
27489 --- a/arch/x86/kernel/reboot_fixups_32.c
27490 +++ b/arch/x86/kernel/reboot_fixups_32.c
27491 @@ -57,7 +57,7 @@ struct device_fixup {
27492 unsigned int vendor;
27493 unsigned int device;
27494 void (*reboot_fixup)(struct pci_dev *);
27499 * PCI ids solely used for fixups_table go here
27500 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27501 index 98111b3..73ca125 100644
27502 --- a/arch/x86/kernel/relocate_kernel_64.S
27503 +++ b/arch/x86/kernel/relocate_kernel_64.S
27504 @@ -96,8 +96,7 @@ relocate_kernel:
27506 /* jump to identity mapped page */
27507 addq $(identity_mapped - relocate_kernel), %r8
27513 /* set return address to 0 if not preserving context */
27514 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27515 index d74ac33..d9efe04 100644
27516 --- a/arch/x86/kernel/setup.c
27517 +++ b/arch/x86/kernel/setup.c
27518 @@ -111,6 +111,7 @@
27519 #include <asm/mce.h>
27520 #include <asm/alternative.h>
27521 #include <asm/prom.h>
27522 +#include <asm/boot.h>
27525 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27526 @@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27530 -#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27531 -__visible unsigned long mmu_cr4_features;
27532 +#ifdef CONFIG_X86_64
27533 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27534 +#elif defined(CONFIG_X86_PAE)
27535 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27537 -__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27538 +__visible unsigned long mmu_cr4_features __read_only;
27541 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27542 @@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27543 * area (640->1Mb) as ram even though it is not.
27546 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27547 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27549 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27551 @@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27552 /* called before trim_bios_range() to spare extra sanitize */
27553 static void __init e820_add_kernel_range(void)
27555 - u64 start = __pa_symbol(_text);
27556 + u64 start = __pa_symbol(ktla_ktva(_text));
27557 u64 size = __pa_symbol(_end) - start;
27560 @@ -860,8 +863,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27562 void __init setup_arch(char **cmdline_p)
27564 +#ifdef CONFIG_X86_32
27565 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27567 memblock_reserve(__pa_symbol(_text),
27568 (unsigned long)__bss_stop - (unsigned long)_text);
27571 early_reserve_initrd();
27573 @@ -959,16 +966,16 @@ void __init setup_arch(char **cmdline_p)
27575 if (!boot_params.hdr.root_flags)
27576 root_mountflags &= ~MS_RDONLY;
27577 - init_mm.start_code = (unsigned long) _text;
27578 - init_mm.end_code = (unsigned long) _etext;
27579 + init_mm.start_code = ktla_ktva((unsigned long) _text);
27580 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
27581 init_mm.end_data = (unsigned long) _edata;
27582 init_mm.brk = _brk_end;
27584 mpx_mm_init(&init_mm);
27586 - code_resource.start = __pa_symbol(_text);
27587 - code_resource.end = __pa_symbol(_etext)-1;
27588 - data_resource.start = __pa_symbol(_etext);
27589 + code_resource.start = __pa_symbol(ktla_ktva(_text));
27590 + code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27591 + data_resource.start = __pa_symbol(_sdata);
27592 data_resource.end = __pa_symbol(_edata)-1;
27593 bss_resource.start = __pa_symbol(__bss_start);
27594 bss_resource.end = __pa_symbol(__bss_stop)-1;
27595 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27596 index e4fcb87..9c06c55 100644
27597 --- a/arch/x86/kernel/setup_percpu.c
27598 +++ b/arch/x86/kernel/setup_percpu.c
27599 @@ -21,19 +21,17 @@
27600 #include <asm/cpu.h>
27601 #include <asm/stackprotector.h>
27603 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27605 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27606 EXPORT_PER_CPU_SYMBOL(cpu_number);
27609 -#ifdef CONFIG_X86_64
27610 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27612 -#define BOOT_PERCPU_OFFSET 0
27615 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27616 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27618 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27619 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27620 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27622 EXPORT_SYMBOL(__per_cpu_offset);
27623 @@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27625 #ifdef CONFIG_NEED_MULTIPLE_NODES
27626 pg_data_t *last = NULL;
27627 - unsigned int cpu;
27630 for_each_possible_cpu(cpu) {
27631 int node = early_cpu_to_node(cpu);
27632 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27634 #ifdef CONFIG_X86_32
27635 struct desc_struct gdt;
27636 + unsigned long base = per_cpu_offset(cpu);
27638 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27639 - 0x2 | DESCTYPE_S, 0x8);
27641 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27642 + 0x83 | DESCTYPE_S, 0xC);
27643 write_gdt_entry(get_cpu_gdt_table(cpu),
27644 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27646 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27647 /* alrighty, percpu areas up and running */
27648 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27649 for_each_possible_cpu(cpu) {
27650 +#ifdef CONFIG_CC_STACKPROTECTOR
27651 +#ifdef CONFIG_X86_32
27652 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
27655 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27656 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27657 per_cpu(cpu_number, cpu) = cpu;
27658 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27660 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27662 +#ifdef CONFIG_CC_STACKPROTECTOR
27663 +#ifdef CONFIG_X86_32
27665 + per_cpu(stack_canary.canary, cpu) = canary;
27669 * Up to this point, the boot CPU has been using .init.data
27670 * area. Reload any changed state for the boot CPU.
27671 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27672 index 1ea14fd..b16147f 100644
27673 --- a/arch/x86/kernel/signal.c
27674 +++ b/arch/x86/kernel/signal.c
27675 @@ -183,7 +183,7 @@ static unsigned long align_sigframe(unsigned long sp)
27676 * Align the stack pointer according to the i386 ABI,
27677 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27679 - sp = ((sp + 4) & -16ul) - 4;
27680 + sp = ((sp - 12) & -16ul) - 4;
27681 #else /* !CONFIG_X86_32 */
27682 sp = round_down(sp, 16) - 8;
27684 @@ -291,10 +291,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27687 if (current->mm->context.vdso)
27688 - restorer = current->mm->context.vdso +
27689 - selected_vdso32->sym___kernel_sigreturn;
27690 + restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27692 - restorer = &frame->retcode;
27693 + restorer = (void __user *)&frame->retcode;
27694 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27695 restorer = ksig->ka.sa.sa_restorer;
27697 @@ -308,7 +307,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27698 * reasons and because gdb uses it as a signature to notice
27699 * signal handler stack frames.
27701 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27702 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27706 @@ -355,8 +354,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27707 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27709 /* Set up to return from userspace. */
27710 - restorer = current->mm->context.vdso +
27711 - selected_vdso32->sym___kernel_rt_sigreturn;
27712 + if (current->mm->context.vdso)
27713 + restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27715 + restorer = (void __user *)&frame->retcode;
27716 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27717 restorer = ksig->ka.sa.sa_restorer;
27718 put_user_ex(restorer, &frame->pretcode);
27719 @@ -368,7 +369,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27720 * reasons and because gdb uses it as a signature to notice
27721 * signal handler stack frames.
27723 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27724 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27725 } put_user_catch(err);
27727 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27728 @@ -598,7 +599,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27730 int usig = ksig->sig;
27731 sigset_t *set = sigmask_to_save();
27732 - compat_sigset_t *cset = (compat_sigset_t *) set;
27733 + sigset_t sigcopy;
27734 + compat_sigset_t *cset;
27738 + cset = (compat_sigset_t *) &sigcopy;
27740 /* Set up the stack frame */
27741 if (is_ia32_frame()) {
27742 @@ -609,7 +615,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27743 } else if (is_x32_frame()) {
27744 return x32_setup_rt_frame(ksig, cset, regs);
27746 - return __setup_rt_frame(ksig->sig, ksig, set, regs);
27747 + return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27751 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27752 index be8e1bd..a3d93fa 100644
27753 --- a/arch/x86/kernel/smp.c
27754 +++ b/arch/x86/kernel/smp.c
27755 @@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27757 __setup("nonmi_ipi", nonmi_ipi_setup);
27759 -struct smp_ops smp_ops = {
27760 +struct smp_ops smp_ops __read_only = {
27761 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27762 .smp_prepare_cpus = native_smp_prepare_cpus,
27763 .smp_cpus_done = native_smp_cpus_done,
27764 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27765 index 50e547e..d59d06a 100644
27766 --- a/arch/x86/kernel/smpboot.c
27767 +++ b/arch/x86/kernel/smpboot.c
27768 @@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
27770 enable_start_cpu0 = 0;
27772 -#ifdef CONFIG_X86_32
27773 + /* otherwise gcc will move up smp_processor_id before the cpu_init */
27776 /* switch away from the initial page table */
27777 +#ifdef CONFIG_PAX_PER_CPU_PGD
27778 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27780 load_cr3(swapper_pg_dir);
27785 - /* otherwise gcc will move up smp_processor_id before the cpu_init */
27788 * Check TSC synchronization with the BP:
27790 @@ -782,18 +785,17 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
27791 alternatives_enable_smp();
27793 per_cpu(current_task, cpu) = idle;
27794 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27796 #ifdef CONFIG_X86_32
27797 /* Stack for startup_32 can be just as for start_secondary onwards */
27799 - per_cpu(cpu_current_top_of_stack, cpu) =
27800 - (unsigned long)task_stack_page(idle) + THREAD_SIZE;
27801 + per_cpu(cpu_current_top_of_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27803 clear_tsk_thread_flag(idle, TIF_FORK);
27804 initial_gs = per_cpu_offset(cpu);
27806 - per_cpu(kernel_stack, cpu) =
27807 - (unsigned long)task_stack_page(idle) + THREAD_SIZE;
27808 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27812 @@ -814,9 +816,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27813 unsigned long timeout;
27815 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27816 - (THREAD_SIZE + task_stack_page(idle))) - 1);
27817 + (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27819 + pax_open_kernel();
27820 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27821 + pax_close_kernel();
27822 initial_code = (unsigned long)start_secondary;
27823 stack_start = idle->thread.sp;
27825 @@ -961,6 +965,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27827 common_cpu_up(cpu, tidle);
27829 +#ifdef CONFIG_PAX_PER_CPU_PGD
27830 + clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27831 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27832 + KERNEL_PGD_PTRS);
27833 + clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27834 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27835 + KERNEL_PGD_PTRS);
27838 err = do_boot_cpu(apicid, cpu, tidle);
27840 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27841 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27842 index 9b4d51d..5d28b58 100644
27843 --- a/arch/x86/kernel/step.c
27844 +++ b/arch/x86/kernel/step.c
27845 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27846 struct desc_struct *desc;
27847 unsigned long base;
27852 mutex_lock(&child->mm->context.lock);
27853 - if (unlikely((seg >> 3) >= child->mm->context.size))
27854 + if (unlikely(seg >= child->mm->context.size))
27855 addr = -1L; /* bogus selector, access would fault */
27857 desc = child->mm->context.ldt + seg;
27858 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27861 mutex_unlock(&child->mm->context.lock);
27863 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27864 + addr = ktla_ktva(addr);
27868 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27869 unsigned char opcode[15];
27870 unsigned long addr = convert_ip_to_linear(child, regs);
27872 + if (addr == -EINVAL)
27875 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27876 for (i = 0; i < copied; i++) {
27877 switch (opcode[i]) {
27878 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27879 new file mode 100644
27880 index 0000000..5877189
27882 +++ b/arch/x86/kernel/sys_i386_32.c
27885 + * This file contains various random system calls that
27886 + * have a non-standard calling sequence on the Linux/i386
27890 +#include <linux/errno.h>
27891 +#include <linux/sched.h>
27892 +#include <linux/mm.h>
27893 +#include <linux/fs.h>
27894 +#include <linux/smp.h>
27895 +#include <linux/sem.h>
27896 +#include <linux/msg.h>
27897 +#include <linux/shm.h>
27898 +#include <linux/stat.h>
27899 +#include <linux/syscalls.h>
27900 +#include <linux/mman.h>
27901 +#include <linux/file.h>
27902 +#include <linux/utsname.h>
27903 +#include <linux/ipc.h>
27904 +#include <linux/elf.h>
27906 +#include <linux/uaccess.h>
27907 +#include <linux/unistd.h>
27909 +#include <asm/syscalls.h>
27911 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27913 + unsigned long pax_task_size = TASK_SIZE;
27915 +#ifdef CONFIG_PAX_SEGMEXEC
27916 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27917 + pax_task_size = SEGMEXEC_TASK_SIZE;
27920 + if (flags & MAP_FIXED)
27921 + if (len > pax_task_size || addr > pax_task_size - len)
27928 + * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27930 +static unsigned long get_align_mask(void)
27932 + if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27935 + if (!(current->flags & PF_RANDOMIZE))
27938 + return va_align.mask;
27942 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
27943 + unsigned long len, unsigned long pgoff, unsigned long flags)
27945 + struct mm_struct *mm = current->mm;
27946 + struct vm_area_struct *vma;
27947 + unsigned long pax_task_size = TASK_SIZE;
27948 + struct vm_unmapped_area_info info;
27949 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27951 +#ifdef CONFIG_PAX_SEGMEXEC
27952 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27953 + pax_task_size = SEGMEXEC_TASK_SIZE;
27956 + pax_task_size -= PAGE_SIZE;
27958 + if (len > pax_task_size)
27961 + if (flags & MAP_FIXED)
27964 +#ifdef CONFIG_PAX_RANDMMAP
27965 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27969 + addr = PAGE_ALIGN(addr);
27970 + if (pax_task_size - len >= addr) {
27971 + vma = find_vma(mm, addr);
27972 + if (check_heap_stack_gap(vma, addr, len, offset))
27978 + info.length = len;
27979 + info.align_mask = filp ? get_align_mask() : 0;
27980 + info.align_offset = pgoff << PAGE_SHIFT;
27981 + info.threadstack_offset = offset;
27983 +#ifdef CONFIG_PAX_PAGEEXEC
27984 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27985 + info.low_limit = 0x00110000UL;
27986 + info.high_limit = mm->start_code;
27988 +#ifdef CONFIG_PAX_RANDMMAP
27989 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27990 + info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27993 + if (info.low_limit < info.high_limit) {
27994 + addr = vm_unmapped_area(&info);
27995 + if (!IS_ERR_VALUE(addr))
28001 + info.low_limit = mm->mmap_base;
28002 + info.high_limit = pax_task_size;
28004 + return vm_unmapped_area(&info);
28008 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28009 + const unsigned long len, const unsigned long pgoff,
28010 + const unsigned long flags)
28012 + struct vm_area_struct *vma;
28013 + struct mm_struct *mm = current->mm;
28014 + unsigned long addr = addr0, pax_task_size = TASK_SIZE;
28015 + struct vm_unmapped_area_info info;
28016 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28018 +#ifdef CONFIG_PAX_SEGMEXEC
28019 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
28020 + pax_task_size = SEGMEXEC_TASK_SIZE;
28023 + pax_task_size -= PAGE_SIZE;
28025 + /* requested length too big for entire address space */
28026 + if (len > pax_task_size)
28029 + if (flags & MAP_FIXED)
28032 +#ifdef CONFIG_PAX_PAGEEXEC
28033 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
28037 +#ifdef CONFIG_PAX_RANDMMAP
28038 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28041 + /* requesting a specific address */
28043 + addr = PAGE_ALIGN(addr);
28044 + if (pax_task_size - len >= addr) {
28045 + vma = find_vma(mm, addr);
28046 + if (check_heap_stack_gap(vma, addr, len, offset))
28051 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
28052 + info.length = len;
28053 + info.low_limit = PAGE_SIZE;
28054 + info.high_limit = mm->mmap_base;
28055 + info.align_mask = filp ? get_align_mask() : 0;
28056 + info.align_offset = pgoff << PAGE_SHIFT;
28057 + info.threadstack_offset = offset;
28059 + addr = vm_unmapped_area(&info);
28060 + if (!(addr & ~PAGE_MASK))
28062 + VM_BUG_ON(addr != -ENOMEM);
28066 + * A failed mmap() very likely causes application failure,
28067 + * so fall back to the bottom-up function here. This scenario
28068 + * can happen with large stack limits and large mmap()
28071 + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
28073 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
28074 index 10e0272..b4bb9a7 100644
28075 --- a/arch/x86/kernel/sys_x86_64.c
28076 +++ b/arch/x86/kernel/sys_x86_64.c
28077 @@ -97,8 +97,8 @@ out:
28081 -static void find_start_end(unsigned long flags, unsigned long *begin,
28082 - unsigned long *end)
28083 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
28084 + unsigned long *begin, unsigned long *end)
28086 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
28087 unsigned long new_begin;
28088 @@ -117,7 +117,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
28089 *begin = new_begin;
28092 - *begin = current->mm->mmap_legacy_base;
28093 + *begin = mm->mmap_legacy_base;
28097 @@ -130,20 +130,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28098 struct vm_area_struct *vma;
28099 struct vm_unmapped_area_info info;
28100 unsigned long begin, end;
28101 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28103 if (flags & MAP_FIXED)
28106 - find_start_end(flags, &begin, &end);
28107 + find_start_end(mm, flags, &begin, &end);
28112 +#ifdef CONFIG_PAX_RANDMMAP
28113 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28117 addr = PAGE_ALIGN(addr);
28118 vma = find_vma(mm, addr);
28119 - if (end - len >= addr &&
28120 - (!vma || addr + len <= vma->vm_start))
28121 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28125 @@ -157,6 +161,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28126 info.align_mask = get_align_mask();
28127 info.align_offset += get_align_bits();
28129 + info.threadstack_offset = offset;
28130 return vm_unmapped_area(&info);
28133 @@ -169,6 +174,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28134 struct mm_struct *mm = current->mm;
28135 unsigned long addr = addr0;
28136 struct vm_unmapped_area_info info;
28137 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28139 /* requested length too big for entire address space */
28140 if (len > TASK_SIZE)
28141 @@ -181,12 +187,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28142 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
28145 +#ifdef CONFIG_PAX_RANDMMAP
28146 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28149 /* requesting a specific address */
28151 addr = PAGE_ALIGN(addr);
28152 vma = find_vma(mm, addr);
28153 - if (TASK_SIZE - len >= addr &&
28154 - (!vma || addr + len <= vma->vm_start))
28155 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28159 @@ -200,6 +209,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28160 info.align_mask = get_align_mask();
28161 info.align_offset += get_align_bits();
28163 + info.threadstack_offset = offset;
28164 addr = vm_unmapped_area(&info);
28165 if (!(addr & ~PAGE_MASK))
28167 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
28168 index 91a4496..42fc304 100644
28169 --- a/arch/x86/kernel/tboot.c
28170 +++ b/arch/x86/kernel/tboot.c
28172 #include <asm/setup.h>
28173 #include <asm/e820.h>
28174 #include <asm/io.h>
28175 +#include <asm/tlbflush.h>
28177 #include "../realmode/rm/wakeup.h"
28179 @@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
28181 void tboot_shutdown(u32 shutdown_type)
28183 - void (*shutdown)(void);
28184 + void (* __noreturn shutdown)(void);
28186 if (!tboot_enabled())
28188 @@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
28189 tboot->shutdown_type = shutdown_type;
28191 switch_to_tboot_pt();
28192 + cr4_clear_bits(X86_CR4_PCIDE);
28194 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
28195 + shutdown = (void *)(unsigned long)tboot->shutdown_entry;
28198 /* should not reach here */
28199 @@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
28203 -static atomic_t ap_wfs_count;
28204 +static atomic_unchecked_t ap_wfs_count;
28206 static int tboot_wait_for_aps(int num_aps)
28208 @@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
28212 - atomic_inc(&ap_wfs_count);
28213 + atomic_inc_unchecked(&ap_wfs_count);
28214 if (num_online_cpus() == 1)
28215 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28216 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28220 @@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
28222 tboot_create_trampoline();
28224 - atomic_set(&ap_wfs_count, 0);
28225 + atomic_set_unchecked(&ap_wfs_count, 0);
28226 register_hotcpu_notifier(&tboot_cpu_notifier);
28228 #ifdef CONFIG_DEBUG_FS
28229 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28230 index d39c091..1df4349 100644
28231 --- a/arch/x86/kernel/time.c
28232 +++ b/arch/x86/kernel/time.c
28233 @@ -32,7 +32,7 @@ unsigned long profile_pc(struct pt_regs *regs)
28235 if (!user_mode(regs) && in_lock_functions(pc)) {
28236 #ifdef CONFIG_FRAME_POINTER
28237 - return *(unsigned long *)(regs->bp + sizeof(long));
28238 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28240 unsigned long *sp =
28241 (unsigned long *)kernel_stack_pointer(regs);
28242 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28243 * or above a saved flags. Eflags has bits 22-31 zero,
28244 * kernel addresses don't.
28247 +#ifdef CONFIG_PAX_KERNEXEC
28248 + return ktla_ktva(sp[0]);
28260 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28261 index 7fc5e84..c6e445a 100644
28262 --- a/arch/x86/kernel/tls.c
28263 +++ b/arch/x86/kernel/tls.c
28264 @@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28265 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28268 +#ifdef CONFIG_PAX_SEGMEXEC
28269 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28273 set_tls_desc(p, idx, &info, 1);
28276 @@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28280 - else if (__copy_from_user(infobuf, ubuf, count))
28281 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28285 diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28286 index 1c113db..287b42e 100644
28287 --- a/arch/x86/kernel/tracepoint.c
28288 +++ b/arch/x86/kernel/tracepoint.c
28290 #include <linux/atomic.h>
28292 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28293 -struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28294 +const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28295 (unsigned long) trace_idt_table };
28297 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28298 -gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28299 +gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28301 static int trace_irq_vector_refcount;
28302 static DEFINE_MUTEX(irq_vector_mutex);
28303 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28304 index 324ab52..0cfd2d05 100644
28305 --- a/arch/x86/kernel/traps.c
28306 +++ b/arch/x86/kernel/traps.c
28308 #include <asm/proto.h>
28310 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28311 -gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28312 +gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28314 #include <asm/processor-flags.h>
28315 #include <asm/setup.h>
28316 @@ -77,7 +77,7 @@ asmlinkage int system_call(void);
28319 /* Must be page-aligned because the real IDT is used in a fixmap. */
28320 -gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28321 +gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28323 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28324 EXPORT_SYMBOL_GPL(used_vectors);
28325 @@ -174,7 +174,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
28326 * will catch asm bugs and any attempt to use ist_preempt_enable
28327 * from double_fault.
28329 - BUG_ON((unsigned long)(current_top_of_stack() -
28330 + BUG_ON((unsigned long)(current_top_of_stack(smp_processor_id()) -
28331 current_stack_pointer()) >= THREAD_SIZE);
28333 preempt_count_sub(HARDIRQ_OFFSET);
28334 @@ -191,7 +191,7 @@ void ist_end_non_atomic(void)
28337 static nokprobe_inline int
28338 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28339 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28340 struct pt_regs *regs, long error_code)
28342 if (v8086_mode(regs)) {
28343 @@ -211,8 +211,20 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28344 if (!fixup_exception(regs)) {
28345 tsk->thread.error_code = error_code;
28346 tsk->thread.trap_nr = trapnr;
28348 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28349 + if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28350 + str = "PAX: suspicious stack segment fault";
28353 die(str, regs, error_code);
28356 +#ifdef CONFIG_PAX_REFCOUNT
28357 + if (trapnr == X86_TRAP_OF)
28358 + pax_report_refcount_overflow(regs);
28364 @@ -251,7 +263,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28368 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28369 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28370 long error_code, siginfo_t *info)
28372 struct task_struct *tsk = current;
28373 @@ -275,7 +287,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28374 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28375 printk_ratelimit()) {
28376 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28377 - tsk->comm, tsk->pid, str,
28378 + tsk->comm, task_pid_nr(tsk), str,
28379 regs->ip, regs->sp, error_code);
28380 print_vma_addr(" in ", regs->ip);
28382 @@ -357,6 +369,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28383 tsk->thread.error_code = error_code;
28384 tsk->thread.trap_nr = X86_TRAP_DF;
28386 +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28387 + if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28388 + die("grsec: kernel stack overflow detected", regs, error_code);
28391 #ifdef CONFIG_DOUBLEFAULT
28392 df_debug(regs, error_code);
28394 @@ -475,11 +492,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
28395 tsk->thread.error_code = error_code;
28396 tsk->thread.trap_nr = X86_TRAP_GP;
28397 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28398 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28399 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28401 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28402 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28403 + die("PAX: suspicious general protection fault", regs, error_code);
28407 die("general protection fault", regs, error_code);
28412 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28413 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28414 + struct mm_struct *mm = tsk->mm;
28415 + unsigned long limit;
28417 + down_write(&mm->mmap_sem);
28418 + limit = mm->context.user_cs_limit;
28419 + if (limit < TASK_SIZE) {
28420 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28421 + up_write(&mm->mmap_sem);
28424 + up_write(&mm->mmap_sem);
28428 tsk->thread.error_code = error_code;
28429 tsk->thread.trap_nr = X86_TRAP_GP;
28431 @@ -578,6 +619,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28432 container_of(task_pt_regs(current),
28433 struct bad_iret_stack, regs);
28435 + if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28438 /* Copy the IRET target to the new stack. */
28439 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28441 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28442 index 5054497..139f8f8 100644
28443 --- a/arch/x86/kernel/tsc.c
28444 +++ b/arch/x86/kernel/tsc.c
28445 @@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28449 - ACCESS_ONCE(c2n->head) = data;
28450 + ACCESS_ONCE_RW(c2n->head) = data;
28454 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28455 index 0b81ad6..fff670e 100644
28456 --- a/arch/x86/kernel/uprobes.c
28457 +++ b/arch/x86/kernel/uprobes.c
28458 @@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28460 if (nleft != rasize) {
28461 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28462 - "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28463 + "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28465 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28467 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28468 index b9242ba..50c5edd 100644
28469 --- a/arch/x86/kernel/verify_cpu.S
28470 +++ b/arch/x86/kernel/verify_cpu.S
28472 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28473 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28474 * arch/x86/kernel/head_32.S: processor startup
28475 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28477 * verify_cpu, returns the status of longmode and SSE in register %eax.
28478 * 0: Success 1: Failure
28479 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28480 index fc9db6e..2c5865d 100644
28481 --- a/arch/x86/kernel/vm86_32.c
28482 +++ b/arch/x86/kernel/vm86_32.c
28484 #include <linux/ptrace.h>
28485 #include <linux/audit.h>
28486 #include <linux/stddef.h>
28487 +#include <linux/grsecurity.h>
28489 #include <asm/uaccess.h>
28490 #include <asm/io.h>
28491 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28495 - tss = &per_cpu(cpu_tss, get_cpu());
28496 + tss = cpu_tss + get_cpu();
28497 current->thread.sp0 = current->thread.saved_sp0;
28498 current->thread.sysenter_cs = __KERNEL_CS;
28499 load_sp0(tss, ¤t->thread);
28500 @@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28502 if (tsk->thread.saved_sp0)
28505 +#ifdef CONFIG_GRKERNSEC_VM86
28506 + if (!capable(CAP_SYS_RAWIO)) {
28507 + gr_handle_vm86();
28512 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28513 offsetof(struct kernel_vm86_struct, vm86plus) -
28514 sizeof(info.regs));
28515 @@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28517 struct vm86plus_struct __user *v86;
28519 +#ifdef CONFIG_GRKERNSEC_VM86
28520 + if (!capable(CAP_SYS_RAWIO)) {
28521 + gr_handle_vm86();
28528 case VM86_REQUEST_IRQ:
28529 @@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28530 tsk->thread.saved_fs = info->regs32->fs;
28531 tsk->thread.saved_gs = get_user_gs(info->regs32);
28533 - tss = &per_cpu(cpu_tss, get_cpu());
28534 + tss = cpu_tss + get_cpu();
28535 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28537 tsk->thread.sysenter_cs = 0;
28538 @@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28539 goto cannot_handle;
28540 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28541 goto cannot_handle;
28542 - intr_ptr = (unsigned long __user *) (i << 2);
28543 + intr_ptr = (__force unsigned long __user *) (i << 2);
28544 if (get_user(segoffs, intr_ptr))
28545 goto cannot_handle;
28546 if ((segoffs >> 16) == BIOSSEG)
28547 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28548 index 00bf300..129df8e 100644
28549 --- a/arch/x86/kernel/vmlinux.lds.S
28550 +++ b/arch/x86/kernel/vmlinux.lds.S
28552 #include <asm/page_types.h>
28553 #include <asm/cache.h>
28554 #include <asm/boot.h>
28555 +#include <asm/segment.h>
28557 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28558 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28560 +#define __KERNEL_TEXT_OFFSET 0
28563 #undef i386 /* in case the preprocessor is a 32bit one */
28565 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28568 text PT_LOAD FLAGS(5); /* R_E */
28569 +#ifdef CONFIG_X86_32
28570 + module PT_LOAD FLAGS(5); /* R_E */
28573 + rodata PT_LOAD FLAGS(5); /* R_E */
28575 + rodata PT_LOAD FLAGS(4); /* R__ */
28577 data PT_LOAD FLAGS(6); /* RW_ */
28578 -#ifdef CONFIG_X86_64
28579 + init.begin PT_LOAD FLAGS(6); /* RW_ */
28581 percpu PT_LOAD FLAGS(6); /* RW_ */
28583 + text.init PT_LOAD FLAGS(5); /* R_E */
28584 + text.exit PT_LOAD FLAGS(5); /* R_E */
28585 init PT_LOAD FLAGS(7); /* RWE */
28587 note PT_NOTE FLAGS(0); /* ___ */
28592 #ifdef CONFIG_X86_32
28593 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28594 - phys_startup_32 = startup_32 - LOAD_OFFSET;
28595 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28597 - . = __START_KERNEL;
28598 - phys_startup_64 = startup_64 - LOAD_OFFSET;
28599 + . = __START_KERNEL;
28602 /* Text and read-only data */
28603 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
28605 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28606 /* bootstrapping code */
28607 +#ifdef CONFIG_X86_32
28608 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28610 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28612 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28617 @@ -104,13 +124,47 @@ SECTIONS
28621 - /* End of text section */
28625 - NOTES :text :note
28626 + . += __KERNEL_TEXT_OFFSET;
28628 - EXCEPTION_TABLE(16) :text = 0x9090
28629 +#ifdef CONFIG_X86_32
28630 + . = ALIGN(PAGE_SIZE);
28631 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28633 +#ifdef CONFIG_PAX_KERNEXEC
28634 + MODULES_EXEC_VADDR = .;
28636 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28637 + . = ALIGN(HPAGE_SIZE) - 1;
28638 + MODULES_EXEC_END = .;
28644 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28645 + /* End of text section */
28647 + _etext = . - __KERNEL_TEXT_OFFSET;
28650 +#ifdef CONFIG_X86_32
28651 + . = ALIGN(PAGE_SIZE);
28652 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28653 + . = ALIGN(PAGE_SIZE);
28654 + *(.empty_zero_page)
28655 + *(.initial_pg_fixmap)
28656 + *(.initial_pg_pmd)
28657 + *(.initial_page_table)
28658 + *(.swapper_pg_dir)
28662 + . = ALIGN(PAGE_SIZE);
28663 + NOTES :rodata :note
28665 + EXCEPTION_TABLE(16) :rodata
28667 #if defined(CONFIG_DEBUG_RODATA)
28668 /* .text should occupy whole number of pages */
28669 @@ -122,16 +176,20 @@ SECTIONS
28672 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28674 +#ifdef CONFIG_PAX_KERNEXEC
28675 + . = ALIGN(HPAGE_SIZE);
28677 + . = ALIGN(PAGE_SIZE);
28680 /* Start of data section */
28684 INIT_TASK_DATA(THREAD_SIZE)
28686 -#ifdef CONFIG_X86_32
28687 - /* 32 bit has nosave before _edata */
28691 PAGE_ALIGNED_DATA(PAGE_SIZE)
28693 @@ -174,12 +232,19 @@ SECTIONS
28694 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28696 /* Init code and data - will be freed after init */
28697 - . = ALIGN(PAGE_SIZE);
28698 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28701 +#ifdef CONFIG_PAX_KERNEXEC
28702 + . = ALIGN(HPAGE_SIZE);
28704 + . = ALIGN(PAGE_SIZE);
28707 __init_begin = .; /* paired with __init_end */
28711 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28714 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28715 * output PHDR, so the next output section - .init.text - should
28716 @@ -190,12 +255,27 @@ SECTIONS
28717 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28720 - INIT_TEXT_SECTION(PAGE_SIZE)
28721 -#ifdef CONFIG_X86_64
28724 + . = ALIGN(PAGE_SIZE);
28726 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28727 + VMLINUX_SYMBOL(_sinittext) = .;
28729 + . = ALIGN(PAGE_SIZE);
28732 - INIT_DATA_SECTION(16)
28734 + * .exit.text is discard at runtime, not link time, to deal with
28735 + * references from .altinstructions and .eh_frame
28737 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28739 + VMLINUX_SYMBOL(_einittext) = .;
28742 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28744 + . = ALIGN(PAGE_SIZE);
28745 + INIT_DATA_SECTION(16) :init
28747 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28748 __x86_cpu_dev_start = .;
28749 @@ -266,19 +346,12 @@ SECTIONS
28754 - * .exit.text is discard at runtime, not link time, to deal with
28755 - * references from .altinstructions and .eh_frame
28757 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28761 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28765 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28766 +#ifndef CONFIG_SMP
28767 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28770 @@ -297,16 +370,10 @@ SECTIONS
28771 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28774 - . = ALIGN(PAGE_SIZE);
28775 __smp_locks_end = .;
28776 + . = ALIGN(PAGE_SIZE);
28779 -#ifdef CONFIG_X86_64
28780 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28786 . = ALIGN(PAGE_SIZE);
28787 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28788 @@ -322,6 +389,7 @@ SECTIONS
28790 . += 64 * 1024; /* 64k alignment slop space */
28791 *(.brk_reservation) /* areas brk users have reserved */
28792 + . = ALIGN(HPAGE_SIZE);
28796 @@ -348,13 +416,12 @@ SECTIONS
28797 * for the boot processor.
28799 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28800 -INIT_PER_CPU(gdt_page);
28801 INIT_PER_CPU(irq_stack_union);
28804 * Build-time check on the image size:
28806 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28807 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28808 "kernel image bigger than KERNEL_IMAGE_SIZE");
28811 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28812 index 2dcc6ff..082dc7a 100644
28813 --- a/arch/x86/kernel/vsyscall_64.c
28814 +++ b/arch/x86/kernel/vsyscall_64.c
28815 @@ -38,15 +38,13 @@
28816 #define CREATE_TRACE_POINTS
28817 #include "vsyscall_trace.h"
28819 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28820 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28822 static int __init vsyscall_setup(char *str)
28825 if (!strcmp("emulate", str))
28826 vsyscall_mode = EMULATE;
28827 - else if (!strcmp("native", str))
28828 - vsyscall_mode = NATIVE;
28829 else if (!strcmp("none", str))
28830 vsyscall_mode = NONE;
28832 @@ -264,8 +262,7 @@ do_ret:
28836 - force_sig(SIGSEGV, current);
28838 + do_group_exit(SIGKILL);
28842 @@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28843 static struct vm_area_struct gate_vma = {
28844 .vm_start = VSYSCALL_ADDR,
28845 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28846 - .vm_page_prot = PAGE_READONLY_EXEC,
28847 - .vm_flags = VM_READ | VM_EXEC,
28848 + .vm_page_prot = PAGE_READONLY,
28849 + .vm_flags = VM_READ,
28850 .vm_ops = &gate_vma_ops,
28853 @@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28854 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28856 if (vsyscall_mode != NONE)
28857 - __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28858 - vsyscall_mode == NATIVE
28859 - ? PAGE_KERNEL_VSYSCALL
28860 - : PAGE_KERNEL_VVAR);
28861 + __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28863 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28864 (unsigned long)VSYSCALL_ADDR);
28865 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28866 index 37d8fa4..66e319a 100644
28867 --- a/arch/x86/kernel/x8664_ksyms_64.c
28868 +++ b/arch/x86/kernel/x8664_ksyms_64.c
28869 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28870 EXPORT_SYMBOL(copy_user_generic_unrolled);
28871 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28872 EXPORT_SYMBOL(__copy_user_nocache);
28873 -EXPORT_SYMBOL(_copy_from_user);
28874 -EXPORT_SYMBOL(_copy_to_user);
28876 EXPORT_SYMBOL(copy_page);
28877 EXPORT_SYMBOL(clear_page);
28878 @@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28879 EXPORT_SYMBOL(___preempt_schedule_context);
28883 +#ifdef CONFIG_PAX_PER_CPU_PGD
28884 +EXPORT_SYMBOL(cpu_pgd);
28886 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28887 index 234b072..b7ab191 100644
28888 --- a/arch/x86/kernel/x86_init.c
28889 +++ b/arch/x86/kernel/x86_init.c
28890 @@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28891 static void default_nmi_init(void) { };
28892 static int default_i8042_detect(void) { return 1; };
28894 -struct x86_platform_ops x86_platform = {
28895 +struct x86_platform_ops x86_platform __read_only = {
28896 .calibrate_tsc = native_calibrate_tsc,
28897 .get_wallclock = mach_get_cmos_time,
28898 .set_wallclock = mach_set_rtc_mmss,
28899 @@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28900 EXPORT_SYMBOL_GPL(x86_platform);
28902 #if defined(CONFIG_PCI_MSI)
28903 -struct x86_msi_ops x86_msi = {
28904 +struct x86_msi_ops x86_msi __read_only = {
28905 .setup_msi_irqs = native_setup_msi_irqs,
28906 .compose_msi_msg = native_compose_msi_msg,
28907 .teardown_msi_irq = native_teardown_msi_irq,
28908 @@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28912 -struct x86_io_apic_ops x86_io_apic_ops = {
28913 +struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28914 .init = native_io_apic_init_mappings,
28915 .read = native_io_apic_read,
28916 .write = native_io_apic_write,
28917 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28918 index 87a815b..727dbe6 100644
28919 --- a/arch/x86/kernel/xsave.c
28920 +++ b/arch/x86/kernel/xsave.c
28921 @@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28923 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28924 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28925 - err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28926 + err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28931 - err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28932 + err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28935 * Read the xstate_bv which we copied (directly from the cpu or
28936 * from the state in task struct) to the user buffers.
28938 - err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28939 + err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28942 * For legacy compatible, we always set FP/SSE bits in the bit
28943 @@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28945 xstate_bv |= XSTATE_FPSSE;
28947 - err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28948 + err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28952 @@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28956 + buf = (struct xsave_struct __user *)____m(buf);
28958 err = xsave_user(buf);
28959 else if (use_fxsr())
28960 @@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28962 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28964 + buf = (void __user *)____m(buf);
28966 if ((unsigned long)buf % 64 || fx_only) {
28967 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28968 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28969 index 1d08ad3..c6a4faf 100644
28970 --- a/arch/x86/kvm/cpuid.c
28971 +++ b/arch/x86/kvm/cpuid.c
28972 @@ -204,15 +204,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28973 struct kvm_cpuid2 *cpuid,
28974 struct kvm_cpuid_entry2 __user *entries)
28980 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28983 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28984 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28985 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28987 + for (i = 0; i < cpuid->nent; ++i) {
28988 + struct kvm_cpuid_entry2 cpuid_entry;
28989 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28991 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
28993 vcpu->arch.cpuid_nent = cpuid->nent;
28994 kvm_apic_set_version(vcpu);
28995 kvm_x86_ops->cpuid_update(vcpu);
28996 @@ -225,15 +230,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28997 struct kvm_cpuid2 *cpuid,
28998 struct kvm_cpuid_entry2 __user *entries)
29004 if (cpuid->nent < vcpu->arch.cpuid_nent)
29007 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
29008 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29009 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29011 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
29012 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
29013 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
29019 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
29020 index 630bcb0..a7f6d9e 100644
29021 --- a/arch/x86/kvm/emulate.c
29022 +++ b/arch/x86/kvm/emulate.c
29023 @@ -3569,7 +3569,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
29024 int cr = ctxt->modrm_reg;
29027 - static u64 cr_reserved_bits[] = {
29028 + static const u64 cr_reserved_bits[] = {
29029 0xffffffff00000000ULL,
29030 0, 0, 0, /* CR3 checked later */
29032 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
29033 index 67d07e0..10769d5 100644
29034 --- a/arch/x86/kvm/lapic.c
29035 +++ b/arch/x86/kvm/lapic.c
29037 #define APIC_BUS_CYCLE_NS 1
29039 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
29040 -#define apic_debug(fmt, arg...)
29041 +#define apic_debug(fmt, arg...) do {} while (0)
29043 #define APIC_LVT_NUM 6
29044 /* 14 is the version for Xeon and Pentium 8.4.8*/
29045 diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
29046 index 9d28383..c4ea87e 100644
29047 --- a/arch/x86/kvm/lapic.h
29048 +++ b/arch/x86/kvm/lapic.h
29049 @@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
29051 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
29053 - return vcpu->arch.apic->pending_events;
29054 + return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
29057 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
29058 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
29059 index 6e6d115..43fecbf 100644
29060 --- a/arch/x86/kvm/paging_tmpl.h
29061 +++ b/arch/x86/kvm/paging_tmpl.h
29062 @@ -343,7 +343,7 @@ retry_walk:
29063 if (unlikely(kvm_is_error_hva(host_addr)))
29066 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
29067 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
29068 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
29070 walker->ptep_user[walker->level - 1] = ptep_user;
29071 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
29072 index 4911bf1..e7d3ed2 100644
29073 --- a/arch/x86/kvm/svm.c
29074 +++ b/arch/x86/kvm/svm.c
29075 @@ -3577,7 +3577,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
29076 int cpu = raw_smp_processor_id();
29078 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
29080 + pax_open_kernel();
29081 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
29082 + pax_close_kernel();
29087 @@ -3973,6 +3977,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
29091 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29092 + __set_fs(current_thread_info()->addr_limit);
29097 local_irq_disable();
29098 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
29099 index 2d73807..84a0e59 100644
29100 --- a/arch/x86/kvm/vmx.c
29101 +++ b/arch/x86/kvm/vmx.c
29102 @@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
29106 -static void vmcs_clear_bits(unsigned long field, u32 mask)
29107 +static void vmcs_clear_bits(unsigned long field, unsigned long mask)
29109 vmcs_writel(field, vmcs_readl(field) & ~mask);
29112 -static void vmcs_set_bits(unsigned long field, u32 mask)
29113 +static void vmcs_set_bits(unsigned long field, unsigned long mask)
29115 vmcs_writel(field, vmcs_readl(field) | mask);
29117 @@ -1705,7 +1705,11 @@ static void reload_tss(void)
29118 struct desc_struct *descs;
29120 descs = (void *)gdt->address;
29122 + pax_open_kernel();
29123 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
29124 + pax_close_kernel();
29129 @@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
29130 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
29131 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
29133 +#ifdef CONFIG_PAX_PER_CPU_PGD
29134 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29137 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
29138 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
29139 vmx->loaded_vmcs->cpu = cpu;
29140 @@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
29141 * reads and returns guest's timestamp counter "register"
29142 * guest_tsc = host_tsc + tsc_offset -- 21.3
29144 -static u64 guest_read_tsc(void)
29145 +static u64 __intentional_overflow(-1) guest_read_tsc(void)
29147 u64 host_tsc, tsc_offset;
29149 @@ -4467,7 +4475,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29152 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
29154 +#ifndef CONFIG_PAX_PER_CPU_PGD
29155 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29158 /* Save the most likely value for this task's CR4 in the VMCS. */
29159 cr4 = cr4_read_shadow();
29160 @@ -4494,7 +4505,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29161 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
29162 vmx->host_idt_base = dt.address;
29164 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
29165 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
29167 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
29168 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
29169 @@ -6107,11 +6118,17 @@ static __init int hardware_setup(void)
29170 * page upon invalidation. No need to do anything if not
29171 * using the APIC_ACCESS_ADDR VMCS field.
29173 - if (!flexpriority_enabled)
29174 - kvm_x86_ops->set_apic_access_page_addr = NULL;
29175 + if (!flexpriority_enabled) {
29176 + pax_open_kernel();
29177 + *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
29178 + pax_close_kernel();
29181 - if (!cpu_has_vmx_tpr_shadow())
29182 - kvm_x86_ops->update_cr8_intercept = NULL;
29183 + if (!cpu_has_vmx_tpr_shadow()) {
29184 + pax_open_kernel();
29185 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29186 + pax_close_kernel();
29189 if (enable_ept && !cpu_has_vmx_ept_2m_page())
29190 kvm_disable_largepages();
29191 @@ -6122,14 +6139,16 @@ static __init int hardware_setup(void)
29192 if (!cpu_has_vmx_apicv())
29195 + pax_open_kernel();
29197 - kvm_x86_ops->update_cr8_intercept = NULL;
29198 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29200 - kvm_x86_ops->hwapic_irr_update = NULL;
29201 - kvm_x86_ops->hwapic_isr_update = NULL;
29202 - kvm_x86_ops->deliver_posted_interrupt = NULL;
29203 - kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29204 + *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
29205 + *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
29206 + *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
29207 + *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29209 + pax_close_kernel();
29211 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
29212 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
29213 @@ -6182,10 +6201,12 @@ static __init int hardware_setup(void)
29217 - kvm_x86_ops->slot_enable_log_dirty = NULL;
29218 - kvm_x86_ops->slot_disable_log_dirty = NULL;
29219 - kvm_x86_ops->flush_log_dirty = NULL;
29220 - kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
29221 + pax_open_kernel();
29222 + *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
29223 + *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
29224 + *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
29225 + *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
29226 + pax_close_kernel();
29229 return alloc_kvm_area();
29230 @@ -8230,6 +8251,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29232 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
29235 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29236 + "ljmp %[cs],$3f\n\t"
29240 /* Save guest registers, load host registers, keep flags */
29241 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
29243 @@ -8282,6 +8309,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29245 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
29246 [wordsize]"i"(sizeof(ulong))
29248 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29249 + ,[cs]"i"(__KERNEL_CS)
29253 #ifdef CONFIG_X86_64
29254 , "rax", "rbx", "rdi", "rsi"
29255 @@ -8295,7 +8327,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29257 update_debugctlmsr(debugctlmsr);
29259 -#ifndef CONFIG_X86_64
29260 +#ifdef CONFIG_X86_32
29262 * The sysexit path does not restore ds/es, so we must set them to
29263 * a reasonable value ourselves.
29264 @@ -8304,8 +8336,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29265 * may be executed in interrupt context, which saves and restore segments
29266 * around it, nullifying its effect.
29268 - loadsegment(ds, __USER_DS);
29269 - loadsegment(es, __USER_DS);
29270 + loadsegment(ds, __KERNEL_DS);
29271 + loadsegment(es, __KERNEL_DS);
29272 + loadsegment(ss, __KERNEL_DS);
29274 +#ifdef CONFIG_PAX_KERNEXEC
29275 + loadsegment(fs, __KERNEL_PERCPU);
29278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29279 + __set_fs(current_thread_info()->addr_limit);
29284 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29285 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29286 index ea306ad..669f42d 100644
29287 --- a/arch/x86/kvm/x86.c
29288 +++ b/arch/x86/kvm/x86.c
29289 @@ -1929,8 +1929,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29291 struct kvm *kvm = vcpu->kvm;
29292 int lm = is_long_mode(vcpu);
29293 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29294 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29295 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29296 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29297 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29298 : kvm->arch.xen_hvm_config.blob_size_32;
29299 u32 page_num = data & ~PAGE_MASK;
29300 @@ -2867,6 +2867,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29301 if (n < msr_list.nmsrs)
29304 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29306 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29307 num_msrs_to_save * sizeof(u32)))
29309 @@ -5784,7 +5786,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29313 -int kvm_arch_init(void *opaque)
29314 +int kvm_arch_init(const void *opaque)
29317 struct kvm_x86_ops *ops = opaque;
29318 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29319 index 8f9a133..3c7694b 100644
29320 --- a/arch/x86/lguest/boot.c
29321 +++ b/arch/x86/lguest/boot.c
29322 @@ -1341,9 +1341,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29323 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29324 * Launcher to reboot us.
29326 -static void lguest_restart(char *reason)
29327 +static __noreturn void lguest_restart(char *reason)
29329 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29334 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29335 index 00933d5..3a64af9 100644
29336 --- a/arch/x86/lib/atomic64_386_32.S
29337 +++ b/arch/x86/lib/atomic64_386_32.S
29338 @@ -48,6 +48,10 @@ BEGIN(read)
29342 +BEGIN(read_unchecked)
29349 @@ -55,6 +59,10 @@ BEGIN(set)
29353 +BEGIN(set_unchecked)
29360 @@ -70,6 +78,20 @@ RET_ENDP
29365 +#ifdef CONFIG_PAX_REFCOUNT
29371 + _ASM_EXTABLE(0b, 0b)
29375 +BEGIN(add_unchecked)
29381 @@ -77,6 +99,24 @@ RET_ENDP
29386 +#ifdef CONFIG_PAX_REFCOUNT
29389 + _ASM_EXTABLE(1234b, 2f)
29395 +#ifdef CONFIG_PAX_REFCOUNT
29400 +BEGIN(add_return_unchecked)
29406 @@ -86,6 +126,20 @@ RET_ENDP
29411 +#ifdef CONFIG_PAX_REFCOUNT
29417 + _ASM_EXTABLE(0b, 0b)
29421 +BEGIN(sub_unchecked)
29427 @@ -96,6 +150,27 @@ BEGIN(sub_return)
29432 +#ifdef CONFIG_PAX_REFCOUNT
29435 + _ASM_EXTABLE(1234b, 2f)
29441 +#ifdef CONFIG_PAX_REFCOUNT
29446 +BEGIN(sub_return_unchecked)
29455 @@ -105,6 +180,20 @@ RET_ENDP
29460 +#ifdef CONFIG_PAX_REFCOUNT
29466 + _ASM_EXTABLE(0b, 0b)
29470 +BEGIN(inc_unchecked)
29476 @@ -114,6 +203,26 @@ BEGIN(inc_return)
29481 +#ifdef CONFIG_PAX_REFCOUNT
29484 + _ASM_EXTABLE(1234b, 2f)
29490 +#ifdef CONFIG_PAX_REFCOUNT
29495 +BEGIN(inc_return_unchecked)
29503 @@ -123,6 +232,20 @@ RET_ENDP
29508 +#ifdef CONFIG_PAX_REFCOUNT
29514 + _ASM_EXTABLE(0b, 0b)
29518 +BEGIN(dec_unchecked)
29524 @@ -132,6 +255,26 @@ BEGIN(dec_return)
29529 +#ifdef CONFIG_PAX_REFCOUNT
29532 + _ASM_EXTABLE(1234b, 2f)
29538 +#ifdef CONFIG_PAX_REFCOUNT
29543 +BEGIN(dec_return_unchecked)
29551 @@ -143,6 +286,13 @@ BEGIN(add_unless)
29556 +#ifdef CONFIG_PAX_REFCOUNT
29559 + _ASM_EXTABLE(1234b, 2f)
29565 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29570 +#ifdef CONFIG_PAX_REFCOUNT
29573 + _ASM_EXTABLE(1234b, 2f)
29579 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29584 +#ifdef CONFIG_PAX_REFCOUNT
29587 + _ASM_EXTABLE(1234b, 1f)
29593 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29594 index 082a851..6a963bc 100644
29595 --- a/arch/x86/lib/atomic64_cx8_32.S
29596 +++ b/arch/x86/lib/atomic64_cx8_32.S
29597 @@ -25,10 +25,20 @@ ENTRY(atomic64_read_cx8)
29601 + pax_force_retaddr
29604 ENDPROC(atomic64_read_cx8)
29606 +ENTRY(atomic64_read_unchecked_cx8)
29610 + pax_force_retaddr
29613 +ENDPROC(atomic64_read_unchecked_cx8)
29615 ENTRY(atomic64_set_cx8)
29618 @@ -38,10 +48,25 @@ ENTRY(atomic64_set_cx8)
29622 + pax_force_retaddr
29625 ENDPROC(atomic64_set_cx8)
29627 +ENTRY(atomic64_set_unchecked_cx8)
29631 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
29632 + * are atomic on 586 and newer */
29636 + pax_force_retaddr
29639 +ENDPROC(atomic64_set_unchecked_cx8)
29641 ENTRY(atomic64_xchg_cx8)
29644 @@ -50,12 +75,13 @@ ENTRY(atomic64_xchg_cx8)
29648 + pax_force_retaddr
29651 ENDPROC(atomic64_xchg_cx8)
29653 -.macro addsub_return func ins insc
29654 -ENTRY(atomic64_\func\()_return_cx8)
29655 +.macro addsub_return func ins insc unchecked=""
29656 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29660 @@ -72,27 +98,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29662 \ins\()l %esi, %ebx
29663 \insc\()l %edi, %ecx
29666 +#ifdef CONFIG_PAX_REFCOUNT
29669 + _ASM_EXTABLE(2b, 3f)
29682 +#ifdef CONFIG_PAX_REFCOUNT
29691 + pax_force_retaddr
29694 -ENDPROC(atomic64_\func\()_return_cx8)
29695 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29698 addsub_return add add adc
29699 addsub_return sub sub sbb
29700 +addsub_return add add adc _unchecked
29701 +addsub_return sub sub sbb _unchecked
29703 -.macro incdec_return func ins insc
29704 -ENTRY(atomic64_\func\()_return_cx8)
29705 +.macro incdec_return func ins insc unchecked=""
29706 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29710 @@ -102,21 +145,38 @@ ENTRY(atomic64_\func\()_return_cx8)
29716 +#ifdef CONFIG_PAX_REFCOUNT
29719 + _ASM_EXTABLE(2b, 3f)
29732 +#ifdef CONFIG_PAX_REFCOUNT
29738 + pax_force_retaddr
29741 -ENDPROC(atomic64_\func\()_return_cx8)
29742 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29745 incdec_return inc add adc
29746 incdec_return dec sub sbb
29747 +incdec_return inc add adc _unchecked
29748 +incdec_return dec sub sbb _unchecked
29750 ENTRY(atomic64_dec_if_positive_cx8)
29752 @@ -128,6 +188,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29757 +#ifdef CONFIG_PAX_REFCOUNT
29760 + _ASM_EXTABLE(1234b, 2f)
29766 @@ -137,6 +204,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29770 + pax_force_retaddr
29773 ENDPROC(atomic64_dec_if_positive_cx8)
29774 @@ -161,6 +229,13 @@ ENTRY(atomic64_add_unless_cx8)
29779 +#ifdef CONFIG_PAX_REFCOUNT
29782 + _ASM_EXTABLE(1234b, 3f)
29788 @@ -171,6 +246,7 @@ ENTRY(atomic64_add_unless_cx8)
29789 CFI_ADJUST_CFA_OFFSET -8
29792 + pax_force_retaddr
29796 @@ -193,6 +269,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29801 +#ifdef CONFIG_PAX_REFCOUNT
29804 + _ASM_EXTABLE(1234b, 3f)
29810 @@ -200,6 +283,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29814 + pax_force_retaddr
29817 ENDPROC(atomic64_inc_not_zero_cx8)
29818 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29819 index 9bc944a..e52be6c 100644
29820 --- a/arch/x86/lib/checksum_32.S
29821 +++ b/arch/x86/lib/checksum_32.S
29823 #include <asm/dwarf2.h>
29824 #include <asm/errno.h>
29825 #include <asm/asm.h>
29827 +#include <asm/segment.h>
29830 * computes a partial checksum, e.g. for TCP/UDP fragments
29832 @@ -285,9 +286,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29837 -ENTRY(csum_partial_copy_generic)
29839 +ENTRY(csum_partial_copy_generic_to_user)
29842 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29845 + jmp csum_partial_copy_generic
29848 +ENTRY(csum_partial_copy_generic_from_user)
29850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29855 +ENTRY(csum_partial_copy_generic)
29857 CFI_ADJUST_CFA_OFFSET 4
29859 @@ -306,7 +322,7 @@ ENTRY(csum_partial_copy_generic)
29861 SRC(1: movw (%esi), %bx )
29863 -DST( movw %bx, (%edi) )
29864 +DST( movw %bx, %es:(%edi) )
29868 @@ -318,30 +334,30 @@ DST( movw %bx, (%edi) )
29869 SRC(1: movl (%esi), %ebx )
29870 SRC( movl 4(%esi), %edx )
29872 -DST( movl %ebx, (%edi) )
29873 +DST( movl %ebx, %es:(%edi) )
29875 -DST( movl %edx, 4(%edi) )
29876 +DST( movl %edx, %es:4(%edi) )
29878 SRC( movl 8(%esi), %ebx )
29879 SRC( movl 12(%esi), %edx )
29881 -DST( movl %ebx, 8(%edi) )
29882 +DST( movl %ebx, %es:8(%edi) )
29884 -DST( movl %edx, 12(%edi) )
29885 +DST( movl %edx, %es:12(%edi) )
29887 SRC( movl 16(%esi), %ebx )
29888 SRC( movl 20(%esi), %edx )
29890 -DST( movl %ebx, 16(%edi) )
29891 +DST( movl %ebx, %es:16(%edi) )
29893 -DST( movl %edx, 20(%edi) )
29894 +DST( movl %edx, %es:20(%edi) )
29896 SRC( movl 24(%esi), %ebx )
29897 SRC( movl 28(%esi), %edx )
29899 -DST( movl %ebx, 24(%edi) )
29900 +DST( movl %ebx, %es:24(%edi) )
29902 -DST( movl %edx, 28(%edi) )
29903 +DST( movl %edx, %es:28(%edi) )
29907 @@ -355,7 +371,7 @@ DST( movl %edx, 28(%edi) )
29908 shrl $2, %edx # This clears CF
29909 SRC(3: movl (%esi), %ebx )
29911 -DST( movl %ebx, (%edi) )
29912 +DST( movl %ebx, %es:(%edi) )
29916 @@ -367,12 +383,12 @@ DST( movl %ebx, (%edi) )
29918 SRC( movw (%esi), %cx )
29920 -DST( movw %cx, (%edi) )
29921 +DST( movw %cx, %es:(%edi) )
29925 SRC(5: movb (%esi), %cl )
29926 -DST( movb %cl, (%edi) )
29927 +DST( movb %cl, %es:(%edi) )
29931 @@ -383,7 +399,7 @@ DST( movb %cl, (%edi) )
29934 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29935 - movl $-EFAULT, (%ebx)
29936 + movl $-EFAULT, %ss:(%ebx)
29938 # zero the complete destination - computing the rest
29940 @@ -396,37 +412,58 @@ DST( movb %cl, (%edi) )
29943 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29944 - movl $-EFAULT,(%ebx)
29945 + movl $-EFAULT,%ss:(%ebx)
29957 popl_cfi %ecx # equivalent to addl $4,%esp
29960 -ENDPROC(csum_partial_copy_generic)
29961 +ENDPROC(csum_partial_copy_generic_to_user)
29965 /* Version for PentiumII/PPro */
29967 #define ROUND1(x) \
29969 SRC(movl x(%esi), %ebx ) ; \
29970 addl %ebx, %eax ; \
29971 - DST(movl %ebx, x(%edi) ) ;
29972 + DST(movl %ebx, %es:x(%edi)) ;
29976 SRC(movl x(%esi), %ebx ) ; \
29977 adcl %ebx, %eax ; \
29978 - DST(movl %ebx, x(%edi) ) ;
29979 + DST(movl %ebx, %es:x(%edi)) ;
29983 -ENTRY(csum_partial_copy_generic)
29985 +ENTRY(csum_partial_copy_generic_to_user)
29988 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29991 + jmp csum_partial_copy_generic
29994 +ENTRY(csum_partial_copy_generic_from_user)
29996 +#ifdef CONFIG_PAX_MEMORY_UDEREF
30001 +ENTRY(csum_partial_copy_generic)
30005 @@ -444,7 +481,7 @@ ENTRY(csum_partial_copy_generic)
30009 - lea 3f(%ebx,%ebx), %ebx
30010 + lea 3f(%ebx,%ebx,2), %ebx
30014 @@ -465,19 +502,19 @@ ENTRY(csum_partial_copy_generic)
30016 SRC( movw (%esi), %dx )
30018 -DST( movw %dx, (%edi) )
30019 +DST( movw %dx, %es:(%edi) )
30024 SRC( movb (%esi), %dl )
30025 -DST( movb %dl, (%edi) )
30026 +DST( movb %dl, %es:(%edi) )
30030 .section .fixup, "ax"
30031 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
30032 - movl $-EFAULT, (%ebx)
30033 + movl $-EFAULT, %ss:(%ebx)
30034 # zero the complete destination (computing the rest is too much work)
30035 movl ARGBASE+8(%esp),%edi # dst
30036 movl ARGBASE+12(%esp),%ecx # len
30037 @@ -485,16 +522,23 @@ DST( movb %dl, (%edi) )
30040 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30041 - movl $-EFAULT, (%ebx)
30042 + movl $-EFAULT, %ss:(%ebx)
30046 +#ifdef CONFIG_PAX_MEMORY_UDEREF
30058 -ENDPROC(csum_partial_copy_generic)
30059 +ENDPROC(csum_partial_copy_generic_to_user)
30063 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
30064 index e67e579..4782449 100644
30065 --- a/arch/x86/lib/clear_page_64.S
30066 +++ b/arch/x86/lib/clear_page_64.S
30067 @@ -23,6 +23,7 @@ ENTRY(clear_page)
30071 + pax_force_retaddr
30074 ENDPROC(clear_page)
30075 @@ -47,6 +48,7 @@ ENTRY(clear_page_orig)
30079 + pax_force_retaddr
30082 ENDPROC(clear_page_orig)
30083 @@ -56,6 +58,7 @@ ENTRY(clear_page_c_e)
30087 + pax_force_retaddr
30090 ENDPROC(clear_page_c_e)
30091 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
30092 index 40a1725..5d12ac4 100644
30093 --- a/arch/x86/lib/cmpxchg16b_emu.S
30094 +++ b/arch/x86/lib/cmpxchg16b_emu.S
30096 #include <linux/linkage.h>
30097 #include <asm/dwarf2.h>
30098 #include <asm/percpu.h>
30099 +#include <asm/alternative-asm.h>
30103 @@ -46,12 +47,14 @@ CFI_STARTPROC
30107 + pax_force_retaddr
30114 + pax_force_retaddr
30118 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
30119 index 8239dbc..e714d2a 100644
30120 --- a/arch/x86/lib/copy_page_64.S
30121 +++ b/arch/x86/lib/copy_page_64.S
30122 @@ -17,6 +17,7 @@ ENTRY(copy_page)
30123 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
30126 + pax_force_retaddr
30130 @@ -27,8 +28,8 @@ ENTRY(copy_page_regs)
30131 CFI_ADJUST_CFA_OFFSET 2*8
30133 CFI_REL_OFFSET rbx, 0
30134 - movq %r12, 1*8(%rsp)
30135 - CFI_REL_OFFSET r12, 1*8
30136 + movq %r13, 1*8(%rsp)
30137 + CFI_REL_OFFSET r13, 1*8
30139 movl $(4096/64)-5, %ecx
30141 @@ -41,7 +42,7 @@ ENTRY(copy_page_regs)
30142 movq 0x8*4(%rsi), %r9
30143 movq 0x8*5(%rsi), %r10
30144 movq 0x8*6(%rsi), %r11
30145 - movq 0x8*7(%rsi), %r12
30146 + movq 0x8*7(%rsi), %r13
30148 prefetcht0 5*64(%rsi)
30150 @@ -52,7 +53,7 @@ ENTRY(copy_page_regs)
30151 movq %r9, 0x8*4(%rdi)
30152 movq %r10, 0x8*5(%rdi)
30153 movq %r11, 0x8*6(%rdi)
30154 - movq %r12, 0x8*7(%rdi)
30155 + movq %r13, 0x8*7(%rdi)
30157 leaq 64 (%rsi), %rsi
30158 leaq 64 (%rdi), %rdi
30159 @@ -71,7 +72,7 @@ ENTRY(copy_page_regs)
30160 movq 0x8*4(%rsi), %r9
30161 movq 0x8*5(%rsi), %r10
30162 movq 0x8*6(%rsi), %r11
30163 - movq 0x8*7(%rsi), %r12
30164 + movq 0x8*7(%rsi), %r13
30166 movq %rax, 0x8*0(%rdi)
30167 movq %rbx, 0x8*1(%rdi)
30168 @@ -80,7 +81,7 @@ ENTRY(copy_page_regs)
30169 movq %r9, 0x8*4(%rdi)
30170 movq %r10, 0x8*5(%rdi)
30171 movq %r11, 0x8*6(%rdi)
30172 - movq %r12, 0x8*7(%rdi)
30173 + movq %r13, 0x8*7(%rdi)
30175 leaq 64(%rdi), %rdi
30176 leaq 64(%rsi), %rsi
30177 @@ -88,10 +89,11 @@ ENTRY(copy_page_regs)
30181 - movq 1*8(%rsp), %r12
30183 + movq 1*8(%rsp), %r13
30186 CFI_ADJUST_CFA_OFFSET -2*8
30187 + pax_force_retaddr
30190 ENDPROC(copy_page_regs)
30191 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
30192 index fa997df..060ab18 100644
30193 --- a/arch/x86/lib/copy_user_64.S
30194 +++ b/arch/x86/lib/copy_user_64.S
30196 #include <asm/alternative-asm.h>
30197 #include <asm/asm.h>
30198 #include <asm/smap.h>
30199 +#include <asm/pgtable.h>
30201 .macro ALIGN_DESTINATION
30202 /* check for bad alignment of destination */
30204 _ASM_EXTABLE(101b,103b)
30207 -/* Standard copy_to_user with segment limit checking */
30208 -ENTRY(_copy_to_user)
30210 - GET_THREAD_INFO(%rax)
30214 - cmpq TI_addr_limit(%rax),%rcx
30216 - ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
30217 - "jmp copy_user_generic_string", \
30218 - X86_FEATURE_REP_GOOD, \
30219 - "jmp copy_user_enhanced_fast_string", \
30222 -ENDPROC(_copy_to_user)
30224 -/* Standard copy_from_user with segment limit checking */
30225 -ENTRY(_copy_from_user)
30227 - GET_THREAD_INFO(%rax)
30231 - cmpq TI_addr_limit(%rax),%rcx
30233 - ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
30234 - "jmp copy_user_generic_string", \
30235 - X86_FEATURE_REP_GOOD, \
30236 - "jmp copy_user_enhanced_fast_string", \
30239 -ENDPROC(_copy_from_user)
30241 - .section .fixup,"ax"
30242 - /* must zero dest */
30243 -ENTRY(bad_from_user)
30254 -ENDPROC(bad_from_user)
30258 * copy_user_generic_unrolled - memory copy with exception handling.
30259 * This version is for CPUs like P4 that don't have efficient micro
30260 @@ -105,6 +56,7 @@ ENDPROC(bad_from_user)
30262 ENTRY(copy_user_generic_unrolled)
30264 + ASM_PAX_OPEN_USERLAND
30267 jb 20f /* less then 8 bytes, go to byte copy loop */
30268 @@ -154,6 +106,8 @@ ENTRY(copy_user_generic_unrolled)
30272 + ASM_PAX_CLOSE_USERLAND
30273 + pax_force_retaddr
30276 .section .fixup,"ax"
30277 @@ -209,6 +163,7 @@ ENDPROC(copy_user_generic_unrolled)
30279 ENTRY(copy_user_generic_string)
30281 + ASM_PAX_OPEN_USERLAND
30284 jb 2f /* less than 8 bytes, go to byte copy loop */
30285 @@ -223,6 +178,8 @@ ENTRY(copy_user_generic_string)
30289 + ASM_PAX_CLOSE_USERLAND
30290 + pax_force_retaddr
30293 .section .fixup,"ax"
30294 @@ -250,12 +207,15 @@ ENDPROC(copy_user_generic_string)
30296 ENTRY(copy_user_enhanced_fast_string)
30298 + ASM_PAX_OPEN_USERLAND
30305 + ASM_PAX_CLOSE_USERLAND
30306 + pax_force_retaddr
30309 .section .fixup,"ax"
30310 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30311 index 6a4f43c..c70fb52 100644
30312 --- a/arch/x86/lib/copy_user_nocache_64.S
30313 +++ b/arch/x86/lib/copy_user_nocache_64.S
30316 #include <linux/linkage.h>
30317 #include <asm/dwarf2.h>
30318 +#include <asm/alternative-asm.h>
30320 #define FIX_ALIGNMENT 1
30323 #include <asm/thread_info.h>
30324 #include <asm/asm.h>
30325 #include <asm/smap.h>
30326 +#include <asm/pgtable.h>
30328 .macro ALIGN_DESTINATION
30329 #ifdef FIX_ALIGNMENT
30332 ENTRY(__copy_user_nocache)
30335 +#ifdef CONFIG_PAX_MEMORY_UDEREF
30336 + mov pax_user_shadow_base,%rcx
30343 + ASM_PAX_OPEN_USERLAND
30346 jb 20f /* less then 8 bytes, go to byte copy loop */
30347 @@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30351 + ASM_PAX_CLOSE_USERLAND
30353 + pax_force_retaddr
30356 .section .fixup,"ax"
30357 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30358 index 9734182..dbee61c 100644
30359 --- a/arch/x86/lib/csum-copy_64.S
30360 +++ b/arch/x86/lib/csum-copy_64.S
30362 #include <asm/dwarf2.h>
30363 #include <asm/errno.h>
30364 #include <asm/asm.h>
30365 +#include <asm/alternative-asm.h>
30368 * Checksum copy with exception handling.
30369 @@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30370 CFI_ADJUST_CFA_OFFSET 7*8
30371 movq %rbx, 2*8(%rsp)
30372 CFI_REL_OFFSET rbx, 2*8
30373 - movq %r12, 3*8(%rsp)
30374 - CFI_REL_OFFSET r12, 3*8
30375 + movq %r15, 3*8(%rsp)
30376 + CFI_REL_OFFSET r15, 3*8
30377 movq %r14, 4*8(%rsp)
30378 CFI_REL_OFFSET r14, 4*8
30379 movq %r13, 5*8(%rsp)
30380 @@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30389 jz .Lhandle_tail /* < 64 */
30393 /* main loop. clear in 64 byte blocks */
30394 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30395 - /* r11: temp3, rdx: temp4, r12 loopcnt */
30396 + /* r11: temp3, rdx: temp4, r15 loopcnt */
30397 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30400 @@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30409 @@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30411 movq 2*8(%rsp), %rbx
30413 - movq 3*8(%rsp), %r12
30415 + movq 3*8(%rsp), %r15
30417 movq 4*8(%rsp), %r14
30419 movq 5*8(%rsp), %r13
30420 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30423 CFI_ADJUST_CFA_OFFSET -7*8
30424 + pax_force_retaddr
30428 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30429 index 1318f75..44c30fd 100644
30430 --- a/arch/x86/lib/csum-wrappers_64.c
30431 +++ b/arch/x86/lib/csum-wrappers_64.c
30432 @@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30436 + pax_open_userland();
30438 - isum = csum_partial_copy_generic((__force const void *)src,
30439 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30440 dst, len, isum, errp, NULL);
30442 + pax_close_userland();
30443 if (unlikely(*errp))
30446 @@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30450 + pax_open_userland();
30452 - ret = csum_partial_copy_generic(src, (void __force *)dst,
30453 + ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30454 len, isum, NULL, errp);
30456 + pax_close_userland();
30459 EXPORT_SYMBOL(csum_partial_copy_to_user);
30460 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30461 index a451235..a74bfa3 100644
30462 --- a/arch/x86/lib/getuser.S
30463 +++ b/arch/x86/lib/getuser.S
30464 @@ -33,17 +33,40 @@
30465 #include <asm/thread_info.h>
30466 #include <asm/asm.h>
30467 #include <asm/smap.h>
30468 +#include <asm/segment.h>
30469 +#include <asm/pgtable.h>
30470 +#include <asm/alternative-asm.h>
30472 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30473 +#define __copyuser_seg gs;
30475 +#define __copyuser_seg
30479 ENTRY(__get_user_1)
30482 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30483 GET_THREAD_INFO(%_ASM_DX)
30484 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30488 + mov pax_user_shadow_base,%_ASM_DX
30489 + cmp %_ASM_DX,%_ASM_AX
30491 + add %_ASM_DX,%_ASM_AX
30498 -1: movzbl (%_ASM_AX),%edx
30499 +1: __copyuser_seg movzbl (%_ASM_AX),%edx
30502 + pax_force_retaddr
30505 ENDPROC(__get_user_1)
30506 @@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30507 ENTRY(__get_user_2)
30511 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30513 GET_THREAD_INFO(%_ASM_DX)
30514 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30517 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30518 + mov pax_user_shadow_base,%_ASM_DX
30519 + cmp %_ASM_DX,%_ASM_AX
30521 + add %_ASM_DX,%_ASM_AX
30528 -2: movzwl -1(%_ASM_AX),%edx
30529 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30532 + pax_force_retaddr
30535 ENDPROC(__get_user_2)
30536 @@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30537 ENTRY(__get_user_4)
30541 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30543 GET_THREAD_INFO(%_ASM_DX)
30544 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30547 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30548 + mov pax_user_shadow_base,%_ASM_DX
30549 + cmp %_ASM_DX,%_ASM_AX
30551 + add %_ASM_DX,%_ASM_AX
30558 -3: movl -3(%_ASM_AX),%edx
30559 +3: __copyuser_seg movl -3(%_ASM_AX),%edx
30562 + pax_force_retaddr
30565 ENDPROC(__get_user_4)
30566 @@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30567 GET_THREAD_INFO(%_ASM_DX)
30568 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30571 +#ifdef CONFIG_PAX_MEMORY_UDEREF
30572 + mov pax_user_shadow_base,%_ASM_DX
30573 + cmp %_ASM_DX,%_ASM_AX
30575 + add %_ASM_DX,%_ASM_AX
30580 4: movq -7(%_ASM_AX),%rdx
30583 + pax_force_retaddr
30587 @@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30588 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30591 -4: movl -7(%_ASM_AX),%edx
30592 -5: movl -3(%_ASM_AX),%ecx
30593 +4: __copyuser_seg movl -7(%_ASM_AX),%edx
30594 +5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30597 + pax_force_retaddr
30601 @@ -113,6 +175,7 @@ bad_get_user:
30603 mov $(-EFAULT),%_ASM_AX
30605 + pax_force_retaddr
30609 @@ -124,6 +187,7 @@ bad_get_user_8:
30611 mov $(-EFAULT),%_ASM_AX
30613 + pax_force_retaddr
30616 END(bad_get_user_8)
30617 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30618 index 8f72b33..a43d9969 100644
30619 --- a/arch/x86/lib/insn.c
30620 +++ b/arch/x86/lib/insn.c
30624 #include <linux/string.h>
30625 +#include <asm/pgtable_types.h>
30627 #include <string.h>
30628 +#define ktla_ktva(addr) addr
30630 #include <asm/inat.h>
30631 #include <asm/insn.h>
30632 @@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30633 buf_len = MAX_INSN_SIZE;
30635 memset(insn, 0, sizeof(*insn));
30636 - insn->kaddr = kaddr;
30637 - insn->end_kaddr = kaddr + buf_len;
30638 - insn->next_byte = kaddr;
30639 + insn->kaddr = ktla_ktva(kaddr);
30640 + insn->end_kaddr = insn->kaddr + buf_len;
30641 + insn->next_byte = insn->kaddr;
30642 insn->x86_64 = x86_64 ? 1 : 0;
30643 insn->opnd_bytes = 4;
30645 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30646 index 05a95e7..326f2fa 100644
30647 --- a/arch/x86/lib/iomap_copy_64.S
30648 +++ b/arch/x86/lib/iomap_copy_64.S
30651 #include <linux/linkage.h>
30652 #include <asm/dwarf2.h>
30653 +#include <asm/alternative-asm.h>
30656 * override generic version in lib/iomap_copy.c
30657 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30661 + pax_force_retaddr
30664 ENDPROC(__iowrite32_copy)
30665 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30666 index b046664..dec9465 100644
30667 --- a/arch/x86/lib/memcpy_64.S
30668 +++ b/arch/x86/lib/memcpy_64.S
30669 @@ -37,6 +37,7 @@ ENTRY(memcpy)
30673 + pax_force_retaddr
30677 @@ -49,6 +50,7 @@ ENTRY(memcpy_erms)
30681 + pax_force_retaddr
30683 ENDPROC(memcpy_erms)
30685 @@ -134,6 +136,7 @@ ENTRY(memcpy_orig)
30686 movq %r9, 1*8(%rdi)
30687 movq %r10, -2*8(%rdi, %rdx)
30688 movq %r11, -1*8(%rdi, %rdx)
30689 + pax_force_retaddr
30693 @@ -146,6 +149,7 @@ ENTRY(memcpy_orig)
30694 movq -1*8(%rsi, %rdx), %r9
30695 movq %r8, 0*8(%rdi)
30696 movq %r9, -1*8(%rdi, %rdx)
30697 + pax_force_retaddr
30701 @@ -159,6 +163,7 @@ ENTRY(memcpy_orig)
30702 movl -4(%rsi, %rdx), %r8d
30704 movl %r8d, -4(%rdi, %rdx)
30705 + pax_force_retaddr
30709 @@ -177,6 +182,7 @@ ENTRY(memcpy_orig)
30713 + pax_force_retaddr
30716 ENDPROC(memcpy_orig)
30717 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30718 index 0f8a0d0..f6e0ea4 100644
30719 --- a/arch/x86/lib/memmove_64.S
30720 +++ b/arch/x86/lib/memmove_64.S
30721 @@ -43,7 +43,7 @@ ENTRY(__memmove)
30724 .Lmemmove_begin_forward:
30725 - ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
30726 + ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; pax_force_retaddr; retq", X86_FEATURE_ERMS
30729 * movsq instruction have many startup latency
30730 @@ -206,6 +206,7 @@ ENTRY(__memmove)
30734 + pax_force_retaddr
30738 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30739 index 93118fb..386ed2a 100644
30740 --- a/arch/x86/lib/memset_64.S
30741 +++ b/arch/x86/lib/memset_64.S
30742 @@ -41,6 +41,7 @@ ENTRY(__memset)
30746 + pax_force_retaddr
30750 @@ -62,6 +63,7 @@ ENTRY(memset_erms)
30754 + pax_force_retaddr
30756 ENDPROC(memset_erms)
30758 @@ -126,6 +128,7 @@ ENTRY(memset_orig)
30762 + pax_force_retaddr
30766 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30767 index c9f2d9b..e7fd2c0 100644
30768 --- a/arch/x86/lib/mmx_32.c
30769 +++ b/arch/x86/lib/mmx_32.c
30770 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30774 + unsigned long cr0;
30776 if (unlikely(in_interrupt()))
30777 return __memcpy(to, from, len);
30778 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30779 kernel_fpu_begin();
30781 __asm__ __volatile__ (
30782 - "1: prefetch (%0)\n" /* This set is 28 bytes */
30783 - " prefetch 64(%0)\n"
30784 - " prefetch 128(%0)\n"
30785 - " prefetch 192(%0)\n"
30786 - " prefetch 256(%0)\n"
30787 + "1: prefetch (%1)\n" /* This set is 28 bytes */
30788 + " prefetch 64(%1)\n"
30789 + " prefetch 128(%1)\n"
30790 + " prefetch 192(%1)\n"
30791 + " prefetch 256(%1)\n"
30793 ".section .fixup, \"ax\"\n"
30794 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30797 +#ifdef CONFIG_PAX_KERNEXEC
30798 + " movl %%cr0, %0\n"
30799 + " movl %0, %%eax\n"
30800 + " andl $0xFFFEFFFF, %%eax\n"
30801 + " movl %%eax, %%cr0\n"
30804 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30806 +#ifdef CONFIG_PAX_KERNEXEC
30807 + " movl %0, %%cr0\n"
30812 _ASM_EXTABLE(1b, 3b)
30814 + : "=&r" (cr0) : "r" (from) : "ax");
30816 for ( ; i > 5; i--) {
30817 __asm__ __volatile__ (
30818 - "1: prefetch 320(%0)\n"
30819 - "2: movq (%0), %%mm0\n"
30820 - " movq 8(%0), %%mm1\n"
30821 - " movq 16(%0), %%mm2\n"
30822 - " movq 24(%0), %%mm3\n"
30823 - " movq %%mm0, (%1)\n"
30824 - " movq %%mm1, 8(%1)\n"
30825 - " movq %%mm2, 16(%1)\n"
30826 - " movq %%mm3, 24(%1)\n"
30827 - " movq 32(%0), %%mm0\n"
30828 - " movq 40(%0), %%mm1\n"
30829 - " movq 48(%0), %%mm2\n"
30830 - " movq 56(%0), %%mm3\n"
30831 - " movq %%mm0, 32(%1)\n"
30832 - " movq %%mm1, 40(%1)\n"
30833 - " movq %%mm2, 48(%1)\n"
30834 - " movq %%mm3, 56(%1)\n"
30835 + "1: prefetch 320(%1)\n"
30836 + "2: movq (%1), %%mm0\n"
30837 + " movq 8(%1), %%mm1\n"
30838 + " movq 16(%1), %%mm2\n"
30839 + " movq 24(%1), %%mm3\n"
30840 + " movq %%mm0, (%2)\n"
30841 + " movq %%mm1, 8(%2)\n"
30842 + " movq %%mm2, 16(%2)\n"
30843 + " movq %%mm3, 24(%2)\n"
30844 + " movq 32(%1), %%mm0\n"
30845 + " movq 40(%1), %%mm1\n"
30846 + " movq 48(%1), %%mm2\n"
30847 + " movq 56(%1), %%mm3\n"
30848 + " movq %%mm0, 32(%2)\n"
30849 + " movq %%mm1, 40(%2)\n"
30850 + " movq %%mm2, 48(%2)\n"
30851 + " movq %%mm3, 56(%2)\n"
30852 ".section .fixup, \"ax\"\n"
30853 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30856 +#ifdef CONFIG_PAX_KERNEXEC
30857 + " movl %%cr0, %0\n"
30858 + " movl %0, %%eax\n"
30859 + " andl $0xFFFEFFFF, %%eax\n"
30860 + " movl %%eax, %%cr0\n"
30863 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30865 +#ifdef CONFIG_PAX_KERNEXEC
30866 + " movl %0, %%cr0\n"
30871 _ASM_EXTABLE(1b, 3b)
30872 - : : "r" (from), "r" (to) : "memory");
30873 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30877 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30878 static void fast_copy_page(void *to, void *from)
30881 + unsigned long cr0;
30883 kernel_fpu_begin();
30885 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30886 * but that is for later. -AV
30888 __asm__ __volatile__(
30889 - "1: prefetch (%0)\n"
30890 - " prefetch 64(%0)\n"
30891 - " prefetch 128(%0)\n"
30892 - " prefetch 192(%0)\n"
30893 - " prefetch 256(%0)\n"
30894 + "1: prefetch (%1)\n"
30895 + " prefetch 64(%1)\n"
30896 + " prefetch 128(%1)\n"
30897 + " prefetch 192(%1)\n"
30898 + " prefetch 256(%1)\n"
30900 ".section .fixup, \"ax\"\n"
30901 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30904 +#ifdef CONFIG_PAX_KERNEXEC
30905 + " movl %%cr0, %0\n"
30906 + " movl %0, %%eax\n"
30907 + " andl $0xFFFEFFFF, %%eax\n"
30908 + " movl %%eax, %%cr0\n"
30911 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30913 +#ifdef CONFIG_PAX_KERNEXEC
30914 + " movl %0, %%cr0\n"
30919 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
30920 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30922 for (i = 0; i < (4096-320)/64; i++) {
30923 __asm__ __volatile__ (
30924 - "1: prefetch 320(%0)\n"
30925 - "2: movq (%0), %%mm0\n"
30926 - " movntq %%mm0, (%1)\n"
30927 - " movq 8(%0), %%mm1\n"
30928 - " movntq %%mm1, 8(%1)\n"
30929 - " movq 16(%0), %%mm2\n"
30930 - " movntq %%mm2, 16(%1)\n"
30931 - " movq 24(%0), %%mm3\n"
30932 - " movntq %%mm3, 24(%1)\n"
30933 - " movq 32(%0), %%mm4\n"
30934 - " movntq %%mm4, 32(%1)\n"
30935 - " movq 40(%0), %%mm5\n"
30936 - " movntq %%mm5, 40(%1)\n"
30937 - " movq 48(%0), %%mm6\n"
30938 - " movntq %%mm6, 48(%1)\n"
30939 - " movq 56(%0), %%mm7\n"
30940 - " movntq %%mm7, 56(%1)\n"
30941 + "1: prefetch 320(%1)\n"
30942 + "2: movq (%1), %%mm0\n"
30943 + " movntq %%mm0, (%2)\n"
30944 + " movq 8(%1), %%mm1\n"
30945 + " movntq %%mm1, 8(%2)\n"
30946 + " movq 16(%1), %%mm2\n"
30947 + " movntq %%mm2, 16(%2)\n"
30948 + " movq 24(%1), %%mm3\n"
30949 + " movntq %%mm3, 24(%2)\n"
30950 + " movq 32(%1), %%mm4\n"
30951 + " movntq %%mm4, 32(%2)\n"
30952 + " movq 40(%1), %%mm5\n"
30953 + " movntq %%mm5, 40(%2)\n"
30954 + " movq 48(%1), %%mm6\n"
30955 + " movntq %%mm6, 48(%2)\n"
30956 + " movq 56(%1), %%mm7\n"
30957 + " movntq %%mm7, 56(%2)\n"
30958 ".section .fixup, \"ax\"\n"
30959 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30962 +#ifdef CONFIG_PAX_KERNEXEC
30963 + " movl %%cr0, %0\n"
30964 + " movl %0, %%eax\n"
30965 + " andl $0xFFFEFFFF, %%eax\n"
30966 + " movl %%eax, %%cr0\n"
30969 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30971 +#ifdef CONFIG_PAX_KERNEXEC
30972 + " movl %0, %%cr0\n"
30977 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30978 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30982 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30983 static void fast_copy_page(void *to, void *from)
30986 + unsigned long cr0;
30988 kernel_fpu_begin();
30990 __asm__ __volatile__ (
30991 - "1: prefetch (%0)\n"
30992 - " prefetch 64(%0)\n"
30993 - " prefetch 128(%0)\n"
30994 - " prefetch 192(%0)\n"
30995 - " prefetch 256(%0)\n"
30996 + "1: prefetch (%1)\n"
30997 + " prefetch 64(%1)\n"
30998 + " prefetch 128(%1)\n"
30999 + " prefetch 192(%1)\n"
31000 + " prefetch 256(%1)\n"
31002 ".section .fixup, \"ax\"\n"
31003 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31006 +#ifdef CONFIG_PAX_KERNEXEC
31007 + " movl %%cr0, %0\n"
31008 + " movl %0, %%eax\n"
31009 + " andl $0xFFFEFFFF, %%eax\n"
31010 + " movl %%eax, %%cr0\n"
31013 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31015 +#ifdef CONFIG_PAX_KERNEXEC
31016 + " movl %0, %%cr0\n"
31021 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
31022 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31024 for (i = 0; i < 4096/64; i++) {
31025 __asm__ __volatile__ (
31026 - "1: prefetch 320(%0)\n"
31027 - "2: movq (%0), %%mm0\n"
31028 - " movq 8(%0), %%mm1\n"
31029 - " movq 16(%0), %%mm2\n"
31030 - " movq 24(%0), %%mm3\n"
31031 - " movq %%mm0, (%1)\n"
31032 - " movq %%mm1, 8(%1)\n"
31033 - " movq %%mm2, 16(%1)\n"
31034 - " movq %%mm3, 24(%1)\n"
31035 - " movq 32(%0), %%mm0\n"
31036 - " movq 40(%0), %%mm1\n"
31037 - " movq 48(%0), %%mm2\n"
31038 - " movq 56(%0), %%mm3\n"
31039 - " movq %%mm0, 32(%1)\n"
31040 - " movq %%mm1, 40(%1)\n"
31041 - " movq %%mm2, 48(%1)\n"
31042 - " movq %%mm3, 56(%1)\n"
31043 + "1: prefetch 320(%1)\n"
31044 + "2: movq (%1), %%mm0\n"
31045 + " movq 8(%1), %%mm1\n"
31046 + " movq 16(%1), %%mm2\n"
31047 + " movq 24(%1), %%mm3\n"
31048 + " movq %%mm0, (%2)\n"
31049 + " movq %%mm1, 8(%2)\n"
31050 + " movq %%mm2, 16(%2)\n"
31051 + " movq %%mm3, 24(%2)\n"
31052 + " movq 32(%1), %%mm0\n"
31053 + " movq 40(%1), %%mm1\n"
31054 + " movq 48(%1), %%mm2\n"
31055 + " movq 56(%1), %%mm3\n"
31056 + " movq %%mm0, 32(%2)\n"
31057 + " movq %%mm1, 40(%2)\n"
31058 + " movq %%mm2, 48(%2)\n"
31059 + " movq %%mm3, 56(%2)\n"
31060 ".section .fixup, \"ax\"\n"
31061 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31064 +#ifdef CONFIG_PAX_KERNEXEC
31065 + " movl %%cr0, %0\n"
31066 + " movl %0, %%eax\n"
31067 + " andl $0xFFFEFFFF, %%eax\n"
31068 + " movl %%eax, %%cr0\n"
31071 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31073 +#ifdef CONFIG_PAX_KERNEXEC
31074 + " movl %0, %%cr0\n"
31079 _ASM_EXTABLE(1b, 3b)
31080 - : : "r" (from), "r" (to) : "memory");
31081 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31085 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
31086 index 3ca5218..c2ae6bc 100644
31087 --- a/arch/x86/lib/msr-reg.S
31088 +++ b/arch/x86/lib/msr-reg.S
31090 #include <asm/dwarf2.h>
31091 #include <asm/asm.h>
31092 #include <asm/msr.h>
31093 +#include <asm/alternative-asm.h>
31095 #ifdef CONFIG_X86_64
31097 @@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
31098 movl %edi, 28(%r10)
31101 + pax_force_retaddr
31105 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
31106 index fc6ba17..14ad9a5 100644
31107 --- a/arch/x86/lib/putuser.S
31108 +++ b/arch/x86/lib/putuser.S
31110 #include <asm/errno.h>
31111 #include <asm/asm.h>
31112 #include <asm/smap.h>
31114 +#include <asm/segment.h>
31115 +#include <asm/pgtable.h>
31116 +#include <asm/alternative-asm.h>
31120 @@ -30,57 +32,125 @@
31121 * as they get called from within inline assembly.
31124 -#define ENTER CFI_STARTPROC ; \
31125 - GET_THREAD_INFO(%_ASM_BX)
31126 -#define EXIT ASM_CLAC ; \
31128 +#define ENTER CFI_STARTPROC
31129 +#define EXIT ASM_CLAC ; \
31130 + pax_force_retaddr ; \
31134 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31135 +#define _DEST %_ASM_CX,%_ASM_BX
31137 +#define _DEST %_ASM_CX
31140 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
31141 +#define __copyuser_seg gs;
31143 +#define __copyuser_seg
31147 ENTRY(__put_user_1)
31150 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31151 + GET_THREAD_INFO(%_ASM_BX)
31152 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
31155 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31156 + mov pax_user_shadow_base,%_ASM_BX
31157 + cmp %_ASM_BX,%_ASM_CX
31166 -1: movb %al,(%_ASM_CX)
31167 +1: __copyuser_seg movb %al,(_DEST)
31170 ENDPROC(__put_user_1)
31172 ENTRY(__put_user_2)
31175 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31176 + GET_THREAD_INFO(%_ASM_BX)
31177 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31179 cmp %_ASM_BX,%_ASM_CX
31182 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31183 + mov pax_user_shadow_base,%_ASM_BX
31184 + cmp %_ASM_BX,%_ASM_CX
31193 -2: movw %ax,(%_ASM_CX)
31194 +2: __copyuser_seg movw %ax,(_DEST)
31197 ENDPROC(__put_user_2)
31199 ENTRY(__put_user_4)
31202 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31203 + GET_THREAD_INFO(%_ASM_BX)
31204 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31206 cmp %_ASM_BX,%_ASM_CX
31209 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31210 + mov pax_user_shadow_base,%_ASM_BX
31211 + cmp %_ASM_BX,%_ASM_CX
31220 -3: movl %eax,(%_ASM_CX)
31221 +3: __copyuser_seg movl %eax,(_DEST)
31224 ENDPROC(__put_user_4)
31226 ENTRY(__put_user_8)
31229 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31230 + GET_THREAD_INFO(%_ASM_BX)
31231 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31233 cmp %_ASM_BX,%_ASM_CX
31236 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31237 + mov pax_user_shadow_base,%_ASM_BX
31238 + cmp %_ASM_BX,%_ASM_CX
31247 -4: mov %_ASM_AX,(%_ASM_CX)
31248 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
31249 #ifdef CONFIG_X86_32
31250 -5: movl %edx,4(%_ASM_CX)
31251 +5: __copyuser_seg movl %edx,4(_DEST)
31255 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31256 index 2322abe..1e78a75 100644
31257 --- a/arch/x86/lib/rwsem.S
31258 +++ b/arch/x86/lib/rwsem.S
31259 @@ -92,6 +92,7 @@ ENTRY(call_rwsem_down_read_failed)
31260 call rwsem_down_read_failed
31261 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
31262 restore_common_regs
31263 + pax_force_retaddr
31266 ENDPROC(call_rwsem_down_read_failed)
31267 @@ -102,6 +103,7 @@ ENTRY(call_rwsem_down_write_failed)
31269 call rwsem_down_write_failed
31270 restore_common_regs
31271 + pax_force_retaddr
31274 ENDPROC(call_rwsem_down_write_failed)
31275 @@ -115,7 +117,8 @@ ENTRY(call_rwsem_wake)
31278 restore_common_regs
31280 +1: pax_force_retaddr
31283 ENDPROC(call_rwsem_wake)
31285 @@ -127,6 +130,7 @@ ENTRY(call_rwsem_downgrade_wake)
31286 call rwsem_downgrade_wake
31287 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
31288 restore_common_regs
31289 + pax_force_retaddr
31292 ENDPROC(call_rwsem_downgrade_wake)
31293 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31294 index f89ba4e9..512b2de 100644
31295 --- a/arch/x86/lib/thunk_64.S
31296 +++ b/arch/x86/lib/thunk_64.S
31298 #include <asm/dwarf2.h>
31299 #include <asm/calling.h>
31300 #include <asm/asm.h>
31301 +#include <asm/alternative-asm.h>
31303 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31304 .macro THUNK name, func, put_ret_addr_in_rdi=0
31305 @@ -69,6 +70,7 @@ restore:
31309 + pax_force_retaddr
31312 _ASM_NOKPROBE(restore)
31313 diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
31314 index ddf9ecb..e342586 100644
31315 --- a/arch/x86/lib/usercopy.c
31316 +++ b/arch/x86/lib/usercopy.c
31317 @@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
31320 if (__range_not_ok(from, n, TASK_SIZE))
31325 * Even though this function is typically called from NMI/IRQ context
31326 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31327 index e2f5e21..4b22130 100644
31328 --- a/arch/x86/lib/usercopy_32.c
31329 +++ b/arch/x86/lib/usercopy_32.c
31330 @@ -42,11 +42,13 @@ do { \
31333 __asm__ __volatile__( \
31334 + __COPYUSER_SET_ES \
31336 "0: rep; stosl\n" \
31338 "1: rep; stosb\n" \
31339 "2: " ASM_CLAC "\n" \
31340 + __COPYUSER_RESTORE_ES \
31341 ".section .fixup,\"ax\"\n" \
31342 "3: lea 0(%2,%0,4),%0\n" \
31344 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31346 #ifdef CONFIG_X86_INTEL_USERCOPY
31347 static unsigned long
31348 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
31349 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31352 __asm__ __volatile__(
31353 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31355 "3: movl 0(%4), %%eax\n"
31356 "4: movl 4(%4), %%edx\n"
31357 - "5: movl %%eax, 0(%3)\n"
31358 - "6: movl %%edx, 4(%3)\n"
31359 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31360 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31361 "7: movl 8(%4), %%eax\n"
31362 "8: movl 12(%4),%%edx\n"
31363 - "9: movl %%eax, 8(%3)\n"
31364 - "10: movl %%edx, 12(%3)\n"
31365 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31366 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31367 "11: movl 16(%4), %%eax\n"
31368 "12: movl 20(%4), %%edx\n"
31369 - "13: movl %%eax, 16(%3)\n"
31370 - "14: movl %%edx, 20(%3)\n"
31371 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31372 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31373 "15: movl 24(%4), %%eax\n"
31374 "16: movl 28(%4), %%edx\n"
31375 - "17: movl %%eax, 24(%3)\n"
31376 - "18: movl %%edx, 28(%3)\n"
31377 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31378 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31379 "19: movl 32(%4), %%eax\n"
31380 "20: movl 36(%4), %%edx\n"
31381 - "21: movl %%eax, 32(%3)\n"
31382 - "22: movl %%edx, 36(%3)\n"
31383 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31384 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31385 "23: movl 40(%4), %%eax\n"
31386 "24: movl 44(%4), %%edx\n"
31387 - "25: movl %%eax, 40(%3)\n"
31388 - "26: movl %%edx, 44(%3)\n"
31389 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31390 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31391 "27: movl 48(%4), %%eax\n"
31392 "28: movl 52(%4), %%edx\n"
31393 - "29: movl %%eax, 48(%3)\n"
31394 - "30: movl %%edx, 52(%3)\n"
31395 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31396 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31397 "31: movl 56(%4), %%eax\n"
31398 "32: movl 60(%4), %%edx\n"
31399 - "33: movl %%eax, 56(%3)\n"
31400 - "34: movl %%edx, 60(%3)\n"
31401 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31402 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31406 @@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31408 " andl $3, %%eax\n"
31410 + __COPYUSER_SET_ES
31412 "36: movl %%eax, %0\n"
31415 + __COPYUSER_RESTORE_ES
31416 + ".section .fixup,\"ax\"\n"
31417 + "101: lea 0(%%eax,%0,4),%0\n"
31420 + _ASM_EXTABLE(1b,100b)
31421 + _ASM_EXTABLE(2b,100b)
31422 + _ASM_EXTABLE(3b,100b)
31423 + _ASM_EXTABLE(4b,100b)
31424 + _ASM_EXTABLE(5b,100b)
31425 + _ASM_EXTABLE(6b,100b)
31426 + _ASM_EXTABLE(7b,100b)
31427 + _ASM_EXTABLE(8b,100b)
31428 + _ASM_EXTABLE(9b,100b)
31429 + _ASM_EXTABLE(10b,100b)
31430 + _ASM_EXTABLE(11b,100b)
31431 + _ASM_EXTABLE(12b,100b)
31432 + _ASM_EXTABLE(13b,100b)
31433 + _ASM_EXTABLE(14b,100b)
31434 + _ASM_EXTABLE(15b,100b)
31435 + _ASM_EXTABLE(16b,100b)
31436 + _ASM_EXTABLE(17b,100b)
31437 + _ASM_EXTABLE(18b,100b)
31438 + _ASM_EXTABLE(19b,100b)
31439 + _ASM_EXTABLE(20b,100b)
31440 + _ASM_EXTABLE(21b,100b)
31441 + _ASM_EXTABLE(22b,100b)
31442 + _ASM_EXTABLE(23b,100b)
31443 + _ASM_EXTABLE(24b,100b)
31444 + _ASM_EXTABLE(25b,100b)
31445 + _ASM_EXTABLE(26b,100b)
31446 + _ASM_EXTABLE(27b,100b)
31447 + _ASM_EXTABLE(28b,100b)
31448 + _ASM_EXTABLE(29b,100b)
31449 + _ASM_EXTABLE(30b,100b)
31450 + _ASM_EXTABLE(31b,100b)
31451 + _ASM_EXTABLE(32b,100b)
31452 + _ASM_EXTABLE(33b,100b)
31453 + _ASM_EXTABLE(34b,100b)
31454 + _ASM_EXTABLE(35b,100b)
31455 + _ASM_EXTABLE(36b,100b)
31456 + _ASM_EXTABLE(37b,100b)
31457 + _ASM_EXTABLE(99b,101b)
31458 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
31459 + : "1"(to), "2"(from), "0"(size)
31460 + : "eax", "edx", "memory");
31464 +static unsigned long
31465 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31468 + __asm__ __volatile__(
31469 + " .align 2,0x90\n"
31470 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31471 + " cmpl $67, %0\n"
31473 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31474 + " .align 2,0x90\n"
31475 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31476 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31477 + "5: movl %%eax, 0(%3)\n"
31478 + "6: movl %%edx, 4(%3)\n"
31479 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31480 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31481 + "9: movl %%eax, 8(%3)\n"
31482 + "10: movl %%edx, 12(%3)\n"
31483 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31484 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31485 + "13: movl %%eax, 16(%3)\n"
31486 + "14: movl %%edx, 20(%3)\n"
31487 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31488 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31489 + "17: movl %%eax, 24(%3)\n"
31490 + "18: movl %%edx, 28(%3)\n"
31491 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31492 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31493 + "21: movl %%eax, 32(%3)\n"
31494 + "22: movl %%edx, 36(%3)\n"
31495 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31496 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31497 + "25: movl %%eax, 40(%3)\n"
31498 + "26: movl %%edx, 44(%3)\n"
31499 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31500 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31501 + "29: movl %%eax, 48(%3)\n"
31502 + "30: movl %%edx, 52(%3)\n"
31503 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31504 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31505 + "33: movl %%eax, 56(%3)\n"
31506 + "34: movl %%edx, 60(%3)\n"
31507 + " addl $-64, %0\n"
31508 + " addl $64, %4\n"
31509 + " addl $64, %3\n"
31510 + " cmpl $63, %0\n"
31512 + "35: movl %0, %%eax\n"
31514 + " andl $3, %%eax\n"
31516 + "99: rep; "__copyuser_seg" movsl\n"
31517 + "36: movl %%eax, %0\n"
31518 + "37: rep; "__copyuser_seg" movsb\n"
31520 ".section .fixup,\"ax\"\n"
31521 "101: lea 0(%%eax,%0,4),%0\n"
31523 @@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31525 __asm__ __volatile__(
31527 - "0: movl 32(%4), %%eax\n"
31528 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31531 - "1: movl 64(%4), %%eax\n"
31532 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31534 - "2: movl 0(%4), %%eax\n"
31535 - "21: movl 4(%4), %%edx\n"
31536 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31537 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31538 " movl %%eax, 0(%3)\n"
31539 " movl %%edx, 4(%3)\n"
31540 - "3: movl 8(%4), %%eax\n"
31541 - "31: movl 12(%4),%%edx\n"
31542 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31543 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31544 " movl %%eax, 8(%3)\n"
31545 " movl %%edx, 12(%3)\n"
31546 - "4: movl 16(%4), %%eax\n"
31547 - "41: movl 20(%4), %%edx\n"
31548 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31549 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31550 " movl %%eax, 16(%3)\n"
31551 " movl %%edx, 20(%3)\n"
31552 - "10: movl 24(%4), %%eax\n"
31553 - "51: movl 28(%4), %%edx\n"
31554 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31555 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31556 " movl %%eax, 24(%3)\n"
31557 " movl %%edx, 28(%3)\n"
31558 - "11: movl 32(%4), %%eax\n"
31559 - "61: movl 36(%4), %%edx\n"
31560 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31561 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31562 " movl %%eax, 32(%3)\n"
31563 " movl %%edx, 36(%3)\n"
31564 - "12: movl 40(%4), %%eax\n"
31565 - "71: movl 44(%4), %%edx\n"
31566 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31567 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31568 " movl %%eax, 40(%3)\n"
31569 " movl %%edx, 44(%3)\n"
31570 - "13: movl 48(%4), %%eax\n"
31571 - "81: movl 52(%4), %%edx\n"
31572 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31573 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31574 " movl %%eax, 48(%3)\n"
31575 " movl %%edx, 52(%3)\n"
31576 - "14: movl 56(%4), %%eax\n"
31577 - "91: movl 60(%4), %%edx\n"
31578 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31579 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31580 " movl %%eax, 56(%3)\n"
31581 " movl %%edx, 60(%3)\n"
31583 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31585 " andl $3, %%eax\n"
31587 - "6: rep; movsl\n"
31588 + "6: rep; "__copyuser_seg" movsl\n"
31590 - "7: rep; movsb\n"
31591 + "7: rep; "__copyuser_seg" movsb\n"
31593 ".section .fixup,\"ax\"\n"
31594 "9: lea 0(%%eax,%0,4),%0\n"
31595 @@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31597 __asm__ __volatile__(
31599 - "0: movl 32(%4), %%eax\n"
31600 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31603 - "1: movl 64(%4), %%eax\n"
31604 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31606 - "2: movl 0(%4), %%eax\n"
31607 - "21: movl 4(%4), %%edx\n"
31608 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31609 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31610 " movnti %%eax, 0(%3)\n"
31611 " movnti %%edx, 4(%3)\n"
31612 - "3: movl 8(%4), %%eax\n"
31613 - "31: movl 12(%4),%%edx\n"
31614 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31615 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31616 " movnti %%eax, 8(%3)\n"
31617 " movnti %%edx, 12(%3)\n"
31618 - "4: movl 16(%4), %%eax\n"
31619 - "41: movl 20(%4), %%edx\n"
31620 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31621 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31622 " movnti %%eax, 16(%3)\n"
31623 " movnti %%edx, 20(%3)\n"
31624 - "10: movl 24(%4), %%eax\n"
31625 - "51: movl 28(%4), %%edx\n"
31626 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31627 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31628 " movnti %%eax, 24(%3)\n"
31629 " movnti %%edx, 28(%3)\n"
31630 - "11: movl 32(%4), %%eax\n"
31631 - "61: movl 36(%4), %%edx\n"
31632 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31633 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31634 " movnti %%eax, 32(%3)\n"
31635 " movnti %%edx, 36(%3)\n"
31636 - "12: movl 40(%4), %%eax\n"
31637 - "71: movl 44(%4), %%edx\n"
31638 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31639 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31640 " movnti %%eax, 40(%3)\n"
31641 " movnti %%edx, 44(%3)\n"
31642 - "13: movl 48(%4), %%eax\n"
31643 - "81: movl 52(%4), %%edx\n"
31644 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31645 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31646 " movnti %%eax, 48(%3)\n"
31647 " movnti %%edx, 52(%3)\n"
31648 - "14: movl 56(%4), %%eax\n"
31649 - "91: movl 60(%4), %%edx\n"
31650 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31651 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31652 " movnti %%eax, 56(%3)\n"
31653 " movnti %%edx, 60(%3)\n"
31655 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31657 " andl $3, %%eax\n"
31659 - "6: rep; movsl\n"
31660 + "6: rep; "__copyuser_seg" movsl\n"
31662 - "7: rep; movsb\n"
31663 + "7: rep; "__copyuser_seg" movsb\n"
31665 ".section .fixup,\"ax\"\n"
31666 "9: lea 0(%%eax,%0,4),%0\n"
31667 @@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31669 __asm__ __volatile__(
31671 - "0: movl 32(%4), %%eax\n"
31672 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31675 - "1: movl 64(%4), %%eax\n"
31676 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31678 - "2: movl 0(%4), %%eax\n"
31679 - "21: movl 4(%4), %%edx\n"
31680 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31681 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31682 " movnti %%eax, 0(%3)\n"
31683 " movnti %%edx, 4(%3)\n"
31684 - "3: movl 8(%4), %%eax\n"
31685 - "31: movl 12(%4),%%edx\n"
31686 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31687 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31688 " movnti %%eax, 8(%3)\n"
31689 " movnti %%edx, 12(%3)\n"
31690 - "4: movl 16(%4), %%eax\n"
31691 - "41: movl 20(%4), %%edx\n"
31692 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31693 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31694 " movnti %%eax, 16(%3)\n"
31695 " movnti %%edx, 20(%3)\n"
31696 - "10: movl 24(%4), %%eax\n"
31697 - "51: movl 28(%4), %%edx\n"
31698 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31699 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31700 " movnti %%eax, 24(%3)\n"
31701 " movnti %%edx, 28(%3)\n"
31702 - "11: movl 32(%4), %%eax\n"
31703 - "61: movl 36(%4), %%edx\n"
31704 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31705 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31706 " movnti %%eax, 32(%3)\n"
31707 " movnti %%edx, 36(%3)\n"
31708 - "12: movl 40(%4), %%eax\n"
31709 - "71: movl 44(%4), %%edx\n"
31710 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31711 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31712 " movnti %%eax, 40(%3)\n"
31713 " movnti %%edx, 44(%3)\n"
31714 - "13: movl 48(%4), %%eax\n"
31715 - "81: movl 52(%4), %%edx\n"
31716 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31717 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31718 " movnti %%eax, 48(%3)\n"
31719 " movnti %%edx, 52(%3)\n"
31720 - "14: movl 56(%4), %%eax\n"
31721 - "91: movl 60(%4), %%edx\n"
31722 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31723 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31724 " movnti %%eax, 56(%3)\n"
31725 " movnti %%edx, 60(%3)\n"
31727 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31729 " andl $3, %%eax\n"
31731 - "6: rep; movsl\n"
31732 + "6: rep; "__copyuser_seg" movsl\n"
31734 - "7: rep; movsb\n"
31735 + "7: rep; "__copyuser_seg" movsb\n"
31737 ".section .fixup,\"ax\"\n"
31738 "9: lea 0(%%eax,%0,4),%0\n"
31739 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31741 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31742 unsigned long size);
31743 -unsigned long __copy_user_intel(void __user *to, const void *from,
31744 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31745 + unsigned long size);
31746 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31747 unsigned long size);
31748 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31749 const void __user *from, unsigned long size);
31750 #endif /* CONFIG_X86_INTEL_USERCOPY */
31752 /* Generic arbitrary sized copy. */
31753 -#define __copy_user(to, from, size) \
31754 +#define __copy_user(to, from, size, prefix, set, restore) \
31756 int __d0, __d1, __d2; \
31757 __asm__ __volatile__( \
31765 - "4: rep; movsb\n" \
31766 + "4: rep; "prefix"movsb\n" \
31770 " .align 2,0x90\n" \
31771 - "0: rep; movsl\n" \
31772 + "0: rep; "prefix"movsl\n" \
31774 - "1: rep; movsb\n" \
31775 + "1: rep; "prefix"movsb\n" \
31778 ".section .fixup,\"ax\"\n" \
31779 "5: addl %3,%0\n" \
31781 @@ -538,14 +650,14 @@ do { \
31785 - "4: rep; movsb\n" \
31786 + "4: rep; "__copyuser_seg"movsb\n" \
31790 " .align 2,0x90\n" \
31791 - "0: rep; movsl\n" \
31792 + "0: rep; "__copyuser_seg"movsl\n" \
31794 - "1: rep; movsb\n" \
31795 + "1: rep; "__copyuser_seg"movsb\n" \
31797 ".section .fixup,\"ax\"\n" \
31798 "5: addl %3,%0\n" \
31799 @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31802 if (movsl_is_ok(to, from, n))
31803 - __copy_user(to, from, n);
31804 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31806 - n = __copy_user_intel(to, from, n);
31807 + n = __generic_copy_to_user_intel(to, from, n);
31811 @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31814 if (movsl_is_ok(to, from, n))
31815 - __copy_user(to, from, n);
31816 + __copy_user(to, from, n, __copyuser_seg, "", "");
31818 - n = __copy_user_intel((void __user *)to,
31819 - (const void *)from, n);
31820 + n = __generic_copy_from_user_intel(to, from, n);
31824 @@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31825 if (n > 64 && cpu_has_xmm2)
31826 n = __copy_user_intel_nocache(to, from, n);
31828 - __copy_user(to, from, n);
31829 + __copy_user(to, from, n, __copyuser_seg, "", "");
31831 - __copy_user(to, from, n);
31832 + __copy_user(to, from, n, __copyuser_seg, "", "");
31837 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31840 - * copy_to_user: - Copy a block of data into user space.
31841 - * @to: Destination address, in user space.
31842 - * @from: Source address, in kernel space.
31843 - * @n: Number of bytes to copy.
31845 - * Context: User context only. This function may sleep.
31847 - * Copy data from kernel space to user space.
31849 - * Returns number of bytes that could not be copied.
31850 - * On success, this will be zero.
31852 -unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31853 +#ifdef CONFIG_PAX_MEMORY_UDEREF
31854 +void __set_fs(mm_segment_t x)
31856 - if (access_ok(VERIFY_WRITE, to, n))
31857 - n = __copy_to_user(to, from, n);
31861 + loadsegment(gs, 0);
31863 + case TASK_SIZE_MAX:
31864 + loadsegment(gs, __USER_DS);
31867 + loadsegment(gs, __KERNEL_DS);
31873 -EXPORT_SYMBOL(_copy_to_user);
31874 +EXPORT_SYMBOL(__set_fs);
31877 - * copy_from_user: - Copy a block of data from user space.
31878 - * @to: Destination address, in kernel space.
31879 - * @from: Source address, in user space.
31880 - * @n: Number of bytes to copy.
31882 - * Context: User context only. This function may sleep.
31884 - * Copy data from user space to kernel space.
31886 - * Returns number of bytes that could not be copied.
31887 - * On success, this will be zero.
31889 - * If some data could not be copied, this function will pad the copied
31890 - * data to the requested size using zero bytes.
31892 -unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31893 +void set_fs(mm_segment_t x)
31895 - if (access_ok(VERIFY_READ, from, n))
31896 - n = __copy_from_user(to, from, n);
31898 - memset(to, 0, n);
31900 + current_thread_info()->addr_limit = x;
31903 -EXPORT_SYMBOL(_copy_from_user);
31904 +EXPORT_SYMBOL(set_fs);
31906 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31907 index 0a42327..7a82465 100644
31908 --- a/arch/x86/lib/usercopy_64.c
31909 +++ b/arch/x86/lib/usercopy_64.c
31910 @@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31912 /* no memory constraint because it doesn't change any memory gcc knows
31914 + pax_open_userland();
31917 " testq %[size8],%[size8]\n"
31918 @@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31919 _ASM_EXTABLE(0b,3b)
31920 _ASM_EXTABLE(1b,2b)
31921 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31922 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31923 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31924 [zero] "r" (0UL), [eight] "r" (8UL));
31926 + pax_close_userland();
31929 EXPORT_SYMBOL(__clear_user);
31930 @@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31932 EXPORT_SYMBOL(clear_user);
31934 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31935 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31937 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31938 - return copy_user_generic((__force void *)to, (__force void *)from, len);
31941 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31942 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31945 EXPORT_SYMBOL(copy_in_user);
31947 @@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31948 * it is not necessary to optimize tail handling.
31950 __visible unsigned long
31951 -copy_user_handle_tail(char *to, char *from, unsigned len)
31952 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31955 + pax_close_userland();
31956 for (; len; --len, to++) {
31959 @@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31960 if (__put_user_nocheck(c, to, sizeof(char)))
31965 /* If the destination is a kernel buffer, we always clear the end */
31966 - if (!__addr_ok(to))
31967 + if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31968 memset(to, 0, len);
31971 diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31972 index a482d10..1a6edb5 100644
31973 --- a/arch/x86/mm/Makefile
31974 +++ b/arch/x86/mm/Makefile
31975 @@ -33,3 +33,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31976 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31978 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31981 +obj-$(CONFIG_X86_64) += uderef_64.o
31982 +CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) -fcall-saved-rax
31983 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31984 index 903ec1e..c4166b2 100644
31985 --- a/arch/x86/mm/extable.c
31986 +++ b/arch/x86/mm/extable.c
31988 static inline unsigned long
31989 ex_insn_addr(const struct exception_table_entry *x)
31991 - return (unsigned long)&x->insn + x->insn;
31992 + unsigned long reloc = 0;
31994 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31995 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31998 + return (unsigned long)&x->insn + x->insn + reloc;
32000 static inline unsigned long
32001 ex_fixup_addr(const struct exception_table_entry *x)
32003 - return (unsigned long)&x->fixup + x->fixup;
32004 + unsigned long reloc = 0;
32006 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32007 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32010 + return (unsigned long)&x->fixup + x->fixup + reloc;
32013 int fixup_exception(struct pt_regs *regs)
32014 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
32015 unsigned long new_ip;
32017 #ifdef CONFIG_PNPBIOS
32018 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
32019 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
32020 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
32021 extern u32 pnp_bios_is_utter_crap;
32022 pnp_bios_is_utter_crap = 1;
32023 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
32028 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32029 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
32030 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32031 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32037 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
32038 index 181c53b..d336596 100644
32039 --- a/arch/x86/mm/fault.c
32040 +++ b/arch/x86/mm/fault.c
32041 @@ -13,12 +13,19 @@
32042 #include <linux/hugetlb.h> /* hstate_index_to_shift */
32043 #include <linux/prefetch.h> /* prefetchw */
32044 #include <linux/context_tracking.h> /* exception_enter(), ... */
32045 +#include <linux/unistd.h>
32046 +#include <linux/compiler.h>
32048 #include <asm/traps.h> /* dotraplinkage, ... */
32049 #include <asm/pgalloc.h> /* pgd_*(), ... */
32050 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
32051 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
32052 #include <asm/vsyscall.h> /* emulate_vsyscall */
32053 +#include <asm/tlbflush.h>
32055 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32056 +#include <asm/stacktrace.h>
32059 #define CREATE_TRACE_POINTS
32060 #include <asm/trace/exceptions.h>
32061 @@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
32062 return !instr_lo || (instr_lo>>1) == 1;
32064 /* Prefetch instruction is 0x0F0D or 0x0F18 */
32065 - if (probe_kernel_address(instr, opcode))
32066 + if (user_mode(regs)) {
32067 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32069 + } else if (probe_kernel_address(instr, opcode))
32072 *prefetch = (instr_lo == 0xF) &&
32073 @@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
32074 while (instr < max_instr) {
32075 unsigned char opcode;
32077 - if (probe_kernel_address(instr, opcode))
32078 + if (user_mode(regs)) {
32079 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32081 + } else if (probe_kernel_address(instr, opcode))
32085 @@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
32086 force_sig_info(si_signo, &info, tsk);
32089 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32090 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
32093 +#ifdef CONFIG_PAX_EMUTRAMP
32094 +static int pax_handle_fetch_fault(struct pt_regs *regs);
32097 +#ifdef CONFIG_PAX_PAGEEXEC
32098 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
32104 + pgd = pgd_offset(mm, address);
32105 + if (!pgd_present(*pgd))
32107 + pud = pud_offset(pgd, address);
32108 + if (!pud_present(*pud))
32110 + pmd = pmd_offset(pud, address);
32111 + if (!pmd_present(*pmd))
32117 DEFINE_SPINLOCK(pgd_lock);
32118 LIST_HEAD(pgd_list);
32120 @@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
32121 for (address = VMALLOC_START & PMD_MASK;
32122 address >= TASK_SIZE && address < FIXADDR_TOP;
32123 address += PMD_SIZE) {
32125 +#ifdef CONFIG_PAX_PER_CPU_PGD
32126 + unsigned long cpu;
32131 spin_lock(&pgd_lock);
32133 +#ifdef CONFIG_PAX_PER_CPU_PGD
32134 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32135 + pgd_t *pgd = get_cpu_pgd(cpu, user);
32138 + ret = vmalloc_sync_one(pgd, address);
32141 + pgd = get_cpu_pgd(cpu, kernel);
32143 list_for_each_entry(page, &pgd_list, lru) {
32145 spinlock_t *pgt_lock;
32148 @@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
32149 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32151 spin_lock(pgt_lock);
32152 - ret = vmalloc_sync_one(page_address(page), address);
32153 + pgd = page_address(page);
32156 + ret = vmalloc_sync_one(pgd, address);
32158 +#ifndef CONFIG_PAX_PER_CPU_PGD
32159 spin_unlock(pgt_lock);
32164 @@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
32165 * an interrupt in the middle of a task switch..
32167 pgd_paddr = read_cr3();
32169 +#ifdef CONFIG_PAX_PER_CPU_PGD
32170 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32171 + vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32174 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32177 @@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
32178 * happen within a race in page table update. In the later
32181 - pgd = pgd_offset(current->active_mm, address);
32183 pgd_ref = pgd_offset_k(address);
32184 if (pgd_none(*pgd_ref))
32187 +#ifdef CONFIG_PAX_PER_CPU_PGD
32188 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32189 + pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32190 + if (pgd_none(*pgd)) {
32191 + set_pgd(pgd, *pgd_ref);
32192 + arch_flush_lazy_mmu_mode();
32194 + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32196 + pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32198 + pgd = pgd_offset(current->active_mm, address);
32201 if (pgd_none(*pgd)) {
32202 set_pgd(pgd, *pgd_ref);
32203 arch_flush_lazy_mmu_mode();
32204 @@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32205 static int is_errata100(struct pt_regs *regs, unsigned long address)
32207 #ifdef CONFIG_X86_64
32208 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32209 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32213 @@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32216 static const char nx_warning[] = KERN_CRIT
32217 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32218 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32219 static const char smep_warning[] = KERN_CRIT
32220 -"unable to execute userspace code (SMEP?) (uid: %d)\n";
32221 +"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32224 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32225 @@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32226 if (!oops_may_print())
32229 - if (error_code & PF_INSTR) {
32230 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32231 unsigned int level;
32234 @@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32235 pte = lookup_address_in_pgd(pgd, address, &level);
32237 if (pte && pte_present(*pte) && !pte_exec(*pte))
32238 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32239 + printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32240 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32241 (pgd_flags(*pgd) & _PAGE_USER) &&
32242 (__read_cr4() & X86_CR4_SMEP))
32243 - printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32244 + printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32247 +#ifdef CONFIG_PAX_KERNEXEC
32248 + if (init_mm.start_code <= address && address < init_mm.end_code) {
32249 + if (current->signal->curr_ip)
32250 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32251 + ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
32252 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32254 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32255 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32259 printk(KERN_ALERT "BUG: unable to handle kernel ");
32260 if (address < PAGE_SIZE)
32261 printk(KERN_CONT "NULL pointer dereference");
32262 @@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32267 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32268 + if (pax_is_fetch_fault(regs, error_code, address)) {
32270 +#ifdef CONFIG_PAX_EMUTRAMP
32271 + switch (pax_handle_fetch_fault(regs)) {
32277 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32278 + do_group_exit(SIGKILL);
32282 /* Kernel addresses are always protection faults: */
32283 if (address >= TASK_SIZE)
32284 error_code |= PF_PROT;
32285 @@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32286 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32288 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32289 - tsk->comm, tsk->pid, address);
32290 + tsk->comm, task_pid_nr(tsk), address);
32291 code = BUS_MCEERR_AR;
32294 @@ -916,6 +1028,107 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32298 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32299 +static inline unsigned long get_limit(unsigned long segment)
32301 + unsigned long __limit;
32303 + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
32304 + return __limit + 1;
32307 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32312 + unsigned char pte_mask;
32314 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32315 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
32318 + /* PaX: it's our fault, let's handle it if we can */
32320 + /* PaX: take a look at read faults before acquiring any locks */
32321 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32322 + /* instruction fetch attempt from a protected page in user mode */
32323 + up_read(&mm->mmap_sem);
32325 +#ifdef CONFIG_PAX_EMUTRAMP
32326 + switch (pax_handle_fetch_fault(regs)) {
32332 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32333 + do_group_exit(SIGKILL);
32336 + pmd = pax_get_pmd(mm, address);
32337 + if (unlikely(!pmd))
32340 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32341 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32342 + pte_unmap_unlock(pte, ptl);
32346 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32347 + /* write attempt to a protected page in user mode */
32348 + pte_unmap_unlock(pte, ptl);
32353 + if (likely(address > get_limit(regs->cs) && cpumask_test_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask)))
32355 + if (likely(address > get_limit(regs->cs)))
32358 + set_pte(pte, pte_mkread(*pte));
32359 + __flush_tlb_one(address);
32360 + pte_unmap_unlock(pte, ptl);
32361 + up_read(&mm->mmap_sem);
32365 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32368 + * PaX: fill DTLB with user rights and retry
32370 + __asm__ __volatile__ (
32372 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32374 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32375 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32376 + * page fault when examined during a TLB load attempt. this is true not only
32377 + * for PTEs holding a non-present entry but also present entries that will
32378 + * raise a page fault (such as those set up by PaX, or the copy-on-write
32379 + * mechanism). in effect it means that we do *not* need to flush the TLBs
32380 + * for our target pages since their PTEs are simply not in the TLBs at all.
32382 + * the best thing in omitting it is that we gain around 15-20% speed in the
32383 + * fast path of the page fault handler and can get rid of tracing since we
32384 + * can no longer flush unintended entries.
32388 + __copyuser_seg"testb $0,(%0)\n"
32391 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32392 + : "memory", "cc");
32393 + pte_unmap_unlock(pte, ptl);
32394 + up_read(&mm->mmap_sem);
32400 * Handle a spurious fault caused by a stale TLB entry.
32402 @@ -1001,6 +1214,9 @@ int show_unhandled_signals = 1;
32404 access_error(unsigned long error_code, struct vm_area_struct *vma)
32406 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32409 if (error_code & PF_WRITE) {
32410 /* write, present and write, not present: */
32411 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32412 @@ -1063,6 +1279,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32416 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32417 + if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32418 + if (!search_exception_tables(regs->ip)) {
32419 + printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
32420 + bad_area_nosemaphore(regs, error_code, address);
32423 + if (address < pax_user_shadow_base) {
32424 + printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
32425 + printk(KERN_EMERG "PAX: faulting IP: %pS\n", (void *)regs->ip);
32426 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_EMERG);
32428 + address -= pax_user_shadow_base;
32433 * Detect and handle instructions that would cause a page fault for
32434 * both a tracked kernel page and a userspace page.
32435 @@ -1187,6 +1419,11 @@ retry:
32439 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32440 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32444 vma = find_vma(mm, address);
32445 if (unlikely(!vma)) {
32446 bad_area(regs, error_code, address);
32447 @@ -1198,18 +1435,24 @@ retry:
32448 bad_area(regs, error_code, address);
32451 - if (error_code & PF_USER) {
32453 - * Accessing the stack below %sp is always a bug.
32454 - * The large cushion allows instructions like enter
32455 - * and pusha to work. ("enter $65535, $31" pushes
32456 - * 32 pointers and then decrements %sp by 65535.)
32458 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32459 - bad_area(regs, error_code, address);
32463 + * Accessing the stack below %sp is always a bug.
32464 + * The large cushion allows instructions like enter
32465 + * and pusha to work. ("enter $65535, $31" pushes
32466 + * 32 pointers and then decrements %sp by 65535.)
32468 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32469 + bad_area(regs, error_code, address);
32473 +#ifdef CONFIG_PAX_SEGMEXEC
32474 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32475 + bad_area(regs, error_code, address);
32480 if (unlikely(expand_stack(vma, address))) {
32481 bad_area(regs, error_code, address);
32483 @@ -1329,3 +1572,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32485 NOKPROBE_SYMBOL(trace_do_page_fault);
32486 #endif /* CONFIG_TRACING */
32488 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32489 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32491 + struct mm_struct *mm = current->mm;
32492 + unsigned long ip = regs->ip;
32494 + if (v8086_mode(regs))
32495 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32497 +#ifdef CONFIG_PAX_PAGEEXEC
32498 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32499 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32501 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32507 +#ifdef CONFIG_PAX_SEGMEXEC
32508 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32509 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32519 +#ifdef CONFIG_PAX_EMUTRAMP
32520 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32524 + do { /* PaX: libffi trampoline emulation */
32525 + unsigned char mov, jmp;
32526 + unsigned int addr1, addr2;
32528 +#ifdef CONFIG_X86_64
32529 + if ((regs->ip + 9) >> 32)
32533 + err = get_user(mov, (unsigned char __user *)regs->ip);
32534 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32535 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32536 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32541 + if (mov == 0xB8 && jmp == 0xE9) {
32542 + regs->ax = addr1;
32543 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32548 + do { /* PaX: gcc trampoline emulation #1 */
32549 + unsigned char mov1, mov2;
32550 + unsigned short jmp;
32551 + unsigned int addr1, addr2;
32553 +#ifdef CONFIG_X86_64
32554 + if ((regs->ip + 11) >> 32)
32558 + err = get_user(mov1, (unsigned char __user *)regs->ip);
32559 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32560 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32561 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32562 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32567 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32568 + regs->cx = addr1;
32569 + regs->ax = addr2;
32570 + regs->ip = addr2;
32575 + do { /* PaX: gcc trampoline emulation #2 */
32576 + unsigned char mov, jmp;
32577 + unsigned int addr1, addr2;
32579 +#ifdef CONFIG_X86_64
32580 + if ((regs->ip + 9) >> 32)
32584 + err = get_user(mov, (unsigned char __user *)regs->ip);
32585 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32586 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32587 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32592 + if (mov == 0xB9 && jmp == 0xE9) {
32593 + regs->cx = addr1;
32594 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32599 + return 1; /* PaX in action */
32602 +#ifdef CONFIG_X86_64
32603 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32607 + do { /* PaX: libffi trampoline emulation */
32608 + unsigned short mov1, mov2, jmp1;
32609 + unsigned char stcclc, jmp2;
32610 + unsigned long addr1, addr2;
32612 + err = get_user(mov1, (unsigned short __user *)regs->ip);
32613 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32614 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32615 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32616 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32617 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32618 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32623 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32624 + regs->r11 = addr1;
32625 + regs->r10 = addr2;
32626 + if (stcclc == 0xF8)
32627 + regs->flags &= ~X86_EFLAGS_CF;
32629 + regs->flags |= X86_EFLAGS_CF;
32630 + regs->ip = addr1;
32635 + do { /* PaX: gcc trampoline emulation #1 */
32636 + unsigned short mov1, mov2, jmp1;
32637 + unsigned char jmp2;
32638 + unsigned int addr1;
32639 + unsigned long addr2;
32641 + err = get_user(mov1, (unsigned short __user *)regs->ip);
32642 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32643 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32644 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32645 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32646 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32651 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32652 + regs->r11 = addr1;
32653 + regs->r10 = addr2;
32654 + regs->ip = addr1;
32659 + do { /* PaX: gcc trampoline emulation #2 */
32660 + unsigned short mov1, mov2, jmp1;
32661 + unsigned char jmp2;
32662 + unsigned long addr1, addr2;
32664 + err = get_user(mov1, (unsigned short __user *)regs->ip);
32665 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32666 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32667 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32668 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32669 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32674 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32675 + regs->r11 = addr1;
32676 + regs->r10 = addr2;
32677 + regs->ip = addr1;
32682 + return 1; /* PaX in action */
32687 + * PaX: decide what to do with offenders (regs->ip = fault address)
32689 + * returns 1 when task should be killed
32690 + * 2 when gcc trampoline was detected
32692 +static int pax_handle_fetch_fault(struct pt_regs *regs)
32694 + if (v8086_mode(regs))
32697 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32700 +#ifdef CONFIG_X86_32
32701 + return pax_handle_fetch_fault_32(regs);
32703 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32704 + return pax_handle_fetch_fault_32(regs);
32706 + return pax_handle_fetch_fault_64(regs);
32711 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32712 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32716 + printk(KERN_ERR "PAX: bytes at PC: ");
32717 + for (i = 0; i < 20; i++) {
32719 + if (get_user(c, (unsigned char __force_user *)pc+i))
32720 + printk(KERN_CONT "?? ");
32722 + printk(KERN_CONT "%02x ", c);
32726 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32727 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
32729 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
32730 +#ifdef CONFIG_X86_32
32731 + printk(KERN_CONT "???????? ");
32733 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32734 + printk(KERN_CONT "???????? ???????? ");
32736 + printk(KERN_CONT "???????????????? ");
32739 +#ifdef CONFIG_X86_64
32740 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32741 + printk(KERN_CONT "%08x ", (unsigned int)c);
32742 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32745 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32753 + * probe_kernel_write(): safely attempt to write to a location
32754 + * @dst: address to write to
32755 + * @src: pointer to the data that shall be written
32756 + * @size: size of the data chunk
32758 + * Safely write to address @dst from the buffer at @src. If a kernel fault
32759 + * happens, handle that and return -EFAULT.
32761 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32764 + mm_segment_t old_fs = get_fs();
32766 + set_fs(KERNEL_DS);
32767 + pagefault_disable();
32768 + pax_open_kernel();
32769 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32770 + pax_close_kernel();
32771 + pagefault_enable();
32774 + return ret ? -EFAULT : 0;
32776 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32777 index 81bf3d2..7ef25c2 100644
32778 --- a/arch/x86/mm/gup.c
32779 +++ b/arch/x86/mm/gup.c
32780 @@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32782 len = (unsigned long) nr_pages << PAGE_SHIFT;
32784 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32785 + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32786 (void __user *)start, len)))
32789 @@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32793 + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32794 + (void __user *)start, len)))
32798 * XXX: batch / limit 'nr', to avoid large irq off latency
32799 * needs some instrumenting to determine the common sizes used by
32800 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32801 index 4500142..53a363c 100644
32802 --- a/arch/x86/mm/highmem_32.c
32803 +++ b/arch/x86/mm/highmem_32.c
32804 @@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32805 idx = type + KM_TYPE_NR*smp_processor_id();
32806 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32807 BUG_ON(!pte_none(*(kmap_pte-idx)));
32809 + pax_open_kernel();
32810 set_pte(kmap_pte-idx, mk_pte(page, prot));
32811 + pax_close_kernel();
32813 arch_flush_lazy_mmu_mode();
32815 return (void *)vaddr;
32816 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32817 index 42982b2..7168fc3 100644
32818 --- a/arch/x86/mm/hugetlbpage.c
32819 +++ b/arch/x86/mm/hugetlbpage.c
32820 @@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32821 #ifdef CONFIG_HUGETLB_PAGE
32822 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32823 unsigned long addr, unsigned long len,
32824 - unsigned long pgoff, unsigned long flags)
32825 + unsigned long pgoff, unsigned long flags, unsigned long offset)
32827 struct hstate *h = hstate_file(file);
32828 struct vm_unmapped_area_info info;
32833 info.low_limit = current->mm->mmap_legacy_base;
32834 info.high_limit = TASK_SIZE;
32835 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32836 info.align_offset = 0;
32837 + info.threadstack_offset = offset;
32838 return vm_unmapped_area(&info);
32841 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32842 unsigned long addr0, unsigned long len,
32843 - unsigned long pgoff, unsigned long flags)
32844 + unsigned long pgoff, unsigned long flags, unsigned long offset)
32846 struct hstate *h = hstate_file(file);
32847 struct vm_unmapped_area_info info;
32848 @@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32849 info.high_limit = current->mm->mmap_base;
32850 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32851 info.align_offset = 0;
32852 + info.threadstack_offset = offset;
32853 addr = vm_unmapped_area(&info);
32856 @@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32857 VM_BUG_ON(addr != -ENOMEM);
32859 info.low_limit = TASK_UNMAPPED_BASE;
32861 +#ifdef CONFIG_PAX_RANDMMAP
32862 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32863 + info.low_limit += current->mm->delta_mmap;
32866 info.high_limit = TASK_SIZE;
32867 addr = vm_unmapped_area(&info);
32869 @@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32870 struct hstate *h = hstate_file(file);
32871 struct mm_struct *mm = current->mm;
32872 struct vm_area_struct *vma;
32873 + unsigned long pax_task_size = TASK_SIZE;
32874 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32876 if (len & ~huge_page_mask(h))
32878 - if (len > TASK_SIZE)
32880 +#ifdef CONFIG_PAX_SEGMEXEC
32881 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32882 + pax_task_size = SEGMEXEC_TASK_SIZE;
32885 + pax_task_size -= PAGE_SIZE;
32887 + if (len > pax_task_size)
32890 if (flags & MAP_FIXED) {
32891 @@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32895 +#ifdef CONFIG_PAX_RANDMMAP
32896 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32900 addr = ALIGN(addr, huge_page_size(h));
32901 vma = find_vma(mm, addr);
32902 - if (TASK_SIZE - len >= addr &&
32903 - (!vma || addr + len <= vma->vm_start))
32904 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32907 if (mm->get_unmapped_area == arch_get_unmapped_area)
32908 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32910 + pgoff, flags, offset);
32912 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32914 + pgoff, flags, offset);
32916 #endif /* CONFIG_HUGETLB_PAGE */
32918 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32919 index 1d55318..d58fd6a 100644
32920 --- a/arch/x86/mm/init.c
32921 +++ b/arch/x86/mm/init.c
32923 #include <linux/swap.h>
32924 #include <linux/memblock.h>
32925 #include <linux/bootmem.h> /* for max_low_pfn */
32926 +#include <linux/tboot.h>
32928 #include <asm/cacheflush.h>
32929 #include <asm/e820.h>
32931 #include <asm/proto.h>
32932 #include <asm/dma.h> /* for MAX_DMA_PFN */
32933 #include <asm/microcode.h>
32934 +#include <asm/desc.h>
32935 +#include <asm/bios_ebda.h>
32938 * We need to define the tracepoints somewhere, and tlb.c
32939 @@ -615,7 +618,18 @@ void __init init_mem_mapping(void)
32940 early_ioremap_page_table_range_init();
32943 +#ifdef CONFIG_PAX_PER_CPU_PGD
32944 + clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32945 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32946 + KERNEL_PGD_PTRS);
32947 + clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32948 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32949 + KERNEL_PGD_PTRS);
32950 + load_cr3(get_cpu_pgd(0, kernel));
32952 load_cr3(swapper_pg_dir);
32957 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32958 @@ -631,10 +645,40 @@ void __init init_mem_mapping(void)
32959 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32960 * mmio resources as well as potential bios/acpi data regions.
32963 +#ifdef CONFIG_GRKERNSEC_KMEM
32964 +static unsigned int ebda_start __read_only;
32965 +static unsigned int ebda_end __read_only;
32968 int devmem_is_allowed(unsigned long pagenr)
32970 - if (pagenr < 256)
32971 +#ifdef CONFIG_GRKERNSEC_KMEM
32976 + if (pagenr >= ebda_start && pagenr < ebda_end)
32978 + /* if tboot is in use, allow access to its hardcoded serial log range */
32979 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32984 +#ifdef CONFIG_VM86
32985 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32990 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32992 +#ifdef CONFIG_GRKERNSEC_KMEM
32993 + /* throw out everything else below 1MB */
32994 + if (pagenr <= 256)
32997 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32999 if (!page_is_ram(pagenr))
33000 @@ -680,8 +724,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
33004 +#ifdef CONFIG_GRKERNSEC_KMEM
33005 +static inline void gr_init_ebda(void)
33007 + unsigned int ebda_addr;
33008 + unsigned int ebda_size = 0;
33010 + ebda_addr = get_bios_ebda();
33012 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
33013 + ebda_size <<= 10;
33015 + if (ebda_addr && ebda_size) {
33016 + ebda_start = ebda_addr >> PAGE_SHIFT;
33017 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
33019 + ebda_start = 0x9f000 >> PAGE_SHIFT;
33020 + ebda_end = 0xa0000 >> PAGE_SHIFT;
33024 +static inline void gr_init_ebda(void) { }
33027 void free_initmem(void)
33029 +#ifdef CONFIG_PAX_KERNEXEC
33030 +#ifdef CONFIG_X86_32
33031 + /* PaX: limit KERNEL_CS to actual size */
33032 + unsigned long addr, limit;
33033 + struct desc_struct d;
33039 + unsigned long addr, end;
33045 +#ifdef CONFIG_PAX_KERNEXEC
33046 +#ifdef CONFIG_X86_32
33047 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
33048 + limit = (limit - 1UL) >> PAGE_SHIFT;
33050 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
33051 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33052 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
33053 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
33054 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
33057 + /* PaX: make KERNEL_CS read-only */
33058 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
33059 + if (!paravirt_enabled())
33060 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
33062 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
33063 + pgd = pgd_offset_k(addr);
33064 + pud = pud_offset(pgd, addr);
33065 + pmd = pmd_offset(pud, addr);
33066 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33069 +#ifdef CONFIG_X86_PAE
33070 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
33072 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
33073 + pgd = pgd_offset_k(addr);
33074 + pud = pud_offset(pgd, addr);
33075 + pmd = pmd_offset(pud, addr);
33076 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33081 +#ifdef CONFIG_MODULES
33082 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
33086 + /* PaX: make kernel code/rodata read-only, rest non-executable */
33087 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
33088 + pgd = pgd_offset_k(addr);
33089 + pud = pud_offset(pgd, addr);
33090 + pmd = pmd_offset(pud, addr);
33091 + if (!pmd_present(*pmd))
33093 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
33094 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33096 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33099 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
33100 + end = addr + KERNEL_IMAGE_SIZE;
33101 + for (; addr < end; addr += PMD_SIZE) {
33102 + pgd = pgd_offset_k(addr);
33103 + pud = pud_offset(pgd, addr);
33104 + pmd = pmd_offset(pud, addr);
33105 + if (!pmd_present(*pmd))
33107 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
33108 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33115 free_init_pages("unused kernel",
33116 (unsigned long)(&__init_begin),
33117 (unsigned long)(&__init_end));
33118 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33119 index c8140e1..59257fc 100644
33120 --- a/arch/x86/mm/init_32.c
33121 +++ b/arch/x86/mm/init_32.c
33122 @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33123 bool __read_mostly __vmalloc_start_set = false;
33126 - * Creates a middle page table and puts a pointer to it in the
33127 - * given global directory entry. This only returns the gd entry
33128 - * in non-PAE compilation mode, since the middle layer is folded.
33130 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
33133 - pmd_t *pmd_table;
33135 -#ifdef CONFIG_X86_PAE
33136 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33137 - pmd_table = (pmd_t *)alloc_low_page();
33138 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33139 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33140 - pud = pud_offset(pgd, 0);
33141 - BUG_ON(pmd_table != pmd_offset(pud, 0));
33143 - return pmd_table;
33146 - pud = pud_offset(pgd, 0);
33147 - pmd_table = pmd_offset(pud, 0);
33149 - return pmd_table;
33153 * Create a page table and place a pointer to it in a middle page
33156 @@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33157 pte_t *page_table = (pte_t *)alloc_low_page();
33159 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33160 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33161 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33163 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33165 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33168 return pte_offset_kernel(pmd, 0);
33171 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
33174 + pmd_t *pmd_table;
33176 + pud = pud_offset(pgd, 0);
33177 + pmd_table = pmd_offset(pud, 0);
33179 + return pmd_table;
33182 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33184 int pgd_idx = pgd_index(vaddr);
33185 @@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33186 int pgd_idx, pmd_idx;
33187 unsigned long vaddr;
33192 unsigned long count = page_table_range_init_count(start, end);
33193 @@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33194 pgd = pgd_base + pgd_idx;
33196 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33197 - pmd = one_md_table_init(pgd);
33198 - pmd = pmd + pmd_index(vaddr);
33199 + pud = pud_offset(pgd, vaddr);
33200 + pmd = pmd_offset(pud, vaddr);
33202 +#ifdef CONFIG_X86_PAE
33203 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33206 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33207 pmd++, pmd_idx++) {
33208 pte = page_table_kmap_check(one_page_table_init(pmd),
33209 @@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33213 -static inline int is_kernel_text(unsigned long addr)
33214 +static inline int is_kernel_text(unsigned long start, unsigned long end)
33216 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33219 + if ((start >= ktla_ktva((unsigned long)_etext) ||
33220 + end <= ktla_ktva((unsigned long)_stext)) &&
33221 + (start >= ktla_ktva((unsigned long)_einittext) ||
33222 + end <= ktla_ktva((unsigned long)_sinittext)) &&
33224 +#ifdef CONFIG_ACPI_SLEEP
33225 + (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33228 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33234 @@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33235 unsigned long last_map_addr = end;
33236 unsigned long start_pfn, end_pfn;
33237 pgd_t *pgd_base = swapper_pg_dir;
33238 - int pgd_idx, pmd_idx, pte_ofs;
33239 + unsigned int pgd_idx, pmd_idx, pte_ofs;
33245 unsigned pages_2m, pages_4k;
33246 @@ -291,8 +295,13 @@ repeat:
33248 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33249 pgd = pgd_base + pgd_idx;
33250 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33251 - pmd = one_md_table_init(pgd);
33252 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33253 + pud = pud_offset(pgd, 0);
33254 + pmd = pmd_offset(pud, 0);
33256 +#ifdef CONFIG_X86_PAE
33257 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33260 if (pfn >= end_pfn)
33262 @@ -304,14 +313,13 @@ repeat:
33264 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33265 pmd++, pmd_idx++) {
33266 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33267 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33270 * Map with big pages if possible, otherwise
33271 * create normal page tables:
33274 - unsigned int addr2;
33275 pgprot_t prot = PAGE_KERNEL_LARGE;
33277 * first pass will use the same initial
33278 @@ -322,11 +330,7 @@ repeat:
33281 pfn &= PMD_MASK >> PAGE_SHIFT;
33282 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33283 - PAGE_OFFSET + PAGE_SIZE-1;
33285 - if (is_kernel_text(addr) ||
33286 - is_kernel_text(addr2))
33287 + if (is_kernel_text(address, address + PMD_SIZE))
33288 prot = PAGE_KERNEL_LARGE_EXEC;
33291 @@ -343,7 +347,7 @@ repeat:
33292 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33294 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33295 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33296 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33297 pgprot_t prot = PAGE_KERNEL;
33299 * first pass will use the same initial
33300 @@ -351,7 +355,7 @@ repeat:
33302 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33304 - if (is_kernel_text(addr))
33305 + if (is_kernel_text(address, address + PAGE_SIZE))
33306 prot = PAGE_KERNEL_EXEC;
33309 @@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33311 pud = pud_offset(pgd, va);
33312 pmd = pmd_offset(pud, va);
33313 - if (!pmd_present(*pmd))
33314 + if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33317 /* should not be large page here */
33318 @@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33320 static void __init pagetable_init(void)
33322 - pgd_t *pgd_base = swapper_pg_dir;
33324 - permanent_kmaps_init(pgd_base);
33325 + permanent_kmaps_init(swapper_pg_dir);
33328 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33329 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33330 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33332 /* user-defined highmem size */
33333 @@ -787,10 +789,10 @@ void __init mem_init(void)
33334 ((unsigned long)&__init_end -
33335 (unsigned long)&__init_begin) >> 10,
33337 - (unsigned long)&_etext, (unsigned long)&_edata,
33338 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33339 + (unsigned long)&_sdata, (unsigned long)&_edata,
33340 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33342 - (unsigned long)&_text, (unsigned long)&_etext,
33343 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33344 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33347 @@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33348 if (!kernel_set_to_readonly)
33351 + start = ktla_ktva(start);
33352 pr_debug("Set kernel text: %lx - %lx for read write\n",
33353 start, start+size);
33355 @@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33356 if (!kernel_set_to_readonly)
33359 + start = ktla_ktva(start);
33360 pr_debug("Set kernel text: %lx - %lx for read only\n",
33361 start, start+size);
33363 @@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33364 unsigned long start = PFN_ALIGN(_text);
33365 unsigned long size = PFN_ALIGN(_etext) - start;
33367 + start = ktla_ktva(start);
33368 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33369 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33371 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33372 index 3fba623..5ee9802 100644
33373 --- a/arch/x86/mm/init_64.c
33374 +++ b/arch/x86/mm/init_64.c
33375 @@ -136,7 +136,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
33376 * around without checking the pgd every time.
33379 -pteval_t __supported_pte_mask __read_mostly = ~0;
33380 +pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33381 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33383 int force_personality32;
33384 @@ -169,7 +169,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33386 for (address = start; address <= end; address += PGDIR_SIZE) {
33387 const pgd_t *pgd_ref = pgd_offset_k(address);
33389 +#ifdef CONFIG_PAX_PER_CPU_PGD
33390 + unsigned long cpu;
33396 * When it is called after memory hot remove, pgd_none()
33397 @@ -180,6 +185,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33400 spin_lock(&pgd_lock);
33402 +#ifdef CONFIG_PAX_PER_CPU_PGD
33403 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33404 + pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33406 + if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33407 + BUG_ON(pgd_page_vaddr(*pgd)
33408 + != pgd_page_vaddr(*pgd_ref));
33411 + if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33414 + if (pgd_none(*pgd))
33415 + set_pgd(pgd, *pgd_ref);
33418 + pgd = pgd_offset_cpu(cpu, kernel, address);
33420 list_for_each_entry(page, &pgd_list, lru) {
33422 spinlock_t *pgt_lock;
33423 @@ -188,6 +212,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33424 /* the pgt_lock only for Xen */
33425 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33426 spin_lock(pgt_lock);
33429 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33430 BUG_ON(pgd_page_vaddr(*pgd)
33431 @@ -201,7 +226,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33432 set_pgd(pgd, *pgd_ref);
33435 +#ifndef CONFIG_PAX_PER_CPU_PGD
33436 spin_unlock(pgt_lock);
33440 spin_unlock(&pgd_lock);
33442 @@ -234,7 +262,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33444 if (pgd_none(*pgd)) {
33445 pud_t *pud = (pud_t *)spp_getpage();
33446 - pgd_populate(&init_mm, pgd, pud);
33447 + pgd_populate_kernel(&init_mm, pgd, pud);
33448 if (pud != pud_offset(pgd, 0))
33449 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33450 pud, pud_offset(pgd, 0));
33451 @@ -246,7 +274,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33453 if (pud_none(*pud)) {
33454 pmd_t *pmd = (pmd_t *) spp_getpage();
33455 - pud_populate(&init_mm, pud, pmd);
33456 + pud_populate_kernel(&init_mm, pud, pmd);
33457 if (pmd != pmd_offset(pud, 0))
33458 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33459 pmd, pmd_offset(pud, 0));
33460 @@ -275,7 +303,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33461 pmd = fill_pmd(pud, vaddr);
33462 pte = fill_pte(pmd, vaddr);
33464 + pax_open_kernel();
33465 set_pte(pte, new_pte);
33466 + pax_close_kernel();
33469 * It's enough to flush this one mapping.
33470 @@ -337,14 +367,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33471 pgd = pgd_offset_k((unsigned long)__va(phys));
33472 if (pgd_none(*pgd)) {
33473 pud = (pud_t *) spp_getpage();
33474 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33476 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33478 pud = pud_offset(pgd, (unsigned long)__va(phys));
33479 if (pud_none(*pud)) {
33480 pmd = (pmd_t *) spp_getpage();
33481 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33483 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33485 pmd = pmd_offset(pud, phys);
33486 BUG_ON(!pmd_none(*pmd));
33487 @@ -585,7 +613,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33490 spin_lock(&init_mm.page_table_lock);
33491 - pud_populate(&init_mm, pud, pmd);
33492 + pud_populate_kernel(&init_mm, pud, pmd);
33493 spin_unlock(&init_mm.page_table_lock);
33496 @@ -626,7 +654,7 @@ kernel_physical_mapping_init(unsigned long start,
33499 spin_lock(&init_mm.page_table_lock);
33500 - pgd_populate(&init_mm, pgd, pud);
33501 + pgd_populate_kernel(&init_mm, pgd, pud);
33502 spin_unlock(&init_mm.page_table_lock);
33503 pgd_changed = true;
33505 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33506 index 9ca35fc..4b2b7b7 100644
33507 --- a/arch/x86/mm/iomap_32.c
33508 +++ b/arch/x86/mm/iomap_32.c
33509 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33510 type = kmap_atomic_idx_push();
33511 idx = type + KM_TYPE_NR * smp_processor_id();
33512 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33514 + pax_open_kernel();
33515 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33516 + pax_close_kernel();
33518 arch_flush_lazy_mmu_mode();
33520 return (void *)vaddr;
33521 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33522 index 70e7444..75b9a13 100644
33523 --- a/arch/x86/mm/ioremap.c
33524 +++ b/arch/x86/mm/ioremap.c
33525 @@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33528 for (i = 0; i < nr_pages; ++i)
33529 - if (pfn_valid(start_pfn + i) &&
33530 - !PageReserved(pfn_to_page(start_pfn + i)))
33531 + if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33532 + !PageReserved(pfn_to_page(start_pfn + i))))
33535 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33536 @@ -288,7 +288,7 @@ EXPORT_SYMBOL(ioremap_prot);
33538 * Caller must ensure there is only one unmapping for the same pointer.
33540 -void iounmap(volatile void __iomem *addr)
33541 +void iounmap(const volatile void __iomem *addr)
33543 struct vm_struct *p, *o;
33545 @@ -351,32 +351,36 @@ int arch_ioremap_pmd_supported(void)
33547 void *xlate_dev_mem_ptr(phys_addr_t phys)
33549 - unsigned long start = phys & PAGE_MASK;
33550 - unsigned long offset = phys & ~PAGE_MASK;
33551 - unsigned long vaddr;
33552 + phys_addr_t pfn = phys >> PAGE_SHIFT;
33554 - /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33555 - if (page_is_ram(start >> PAGE_SHIFT))
33556 - return __va(phys);
33557 + if (page_is_ram(pfn)) {
33558 +#ifdef CONFIG_HIGHMEM
33559 + if (pfn >= max_low_pfn)
33560 + return kmap_high(pfn_to_page(pfn));
33563 + return __va(phys);
33566 - vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
33567 - /* Only add the offset on success and return NULL if the ioremap() failed: */
33571 - return (void *)vaddr;
33572 + return (void __force *)ioremap_cache(phys, 1);
33575 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33577 - if (page_is_ram(phys >> PAGE_SHIFT))
33578 + phys_addr_t pfn = phys >> PAGE_SHIFT;
33580 + if (page_is_ram(pfn)) {
33581 +#ifdef CONFIG_HIGHMEM
33582 + if (pfn >= max_low_pfn)
33583 + kunmap_high(pfn_to_page(pfn));
33588 - iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33590 + iounmap((void __iomem __force *)addr);
33593 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33594 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33596 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33598 @@ -412,8 +416,7 @@ void __init early_ioremap_init(void)
33599 early_ioremap_setup();
33601 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33602 - memset(bm_pte, 0, sizeof(bm_pte));
33603 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
33604 + pmd_populate_user(&init_mm, pmd, bm_pte);
33607 * The boot-ioremap range spans multiple pmds, for which
33608 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33609 index b4f2e7e..96c9c3e 100644
33610 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
33611 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33612 @@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33613 * memory (e.g. tracked pages)? For now, we need this to avoid
33614 * invoking kmemcheck for PnP BIOS calls.
33616 - if (regs->flags & X86_VM_MASK)
33617 + if (v8086_mode(regs))
33619 - if (regs->cs != __KERNEL_CS)
33620 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33623 pte = kmemcheck_pte_lookup(address);
33624 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33625 index 9d518d6..8a091f5 100644
33626 --- a/arch/x86/mm/mmap.c
33627 +++ b/arch/x86/mm/mmap.c
33628 @@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33629 * Leave an at least ~128 MB hole with possible stack randomization.
33631 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33632 -#define MAX_GAP (TASK_SIZE/6*5)
33633 +#define MAX_GAP (pax_task_size/6*5)
33635 static int mmap_is_legacy(void)
33637 @@ -81,27 +81,40 @@ unsigned long arch_mmap_rnd(void)
33638 return rnd << PAGE_SHIFT;
33641 -static unsigned long mmap_base(unsigned long rnd)
33642 +static unsigned long mmap_base(struct mm_struct *mm, unsigned long rnd)
33644 unsigned long gap = rlimit(RLIMIT_STACK);
33645 + unsigned long pax_task_size = TASK_SIZE;
33647 +#ifdef CONFIG_PAX_SEGMEXEC
33648 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
33649 + pax_task_size = SEGMEXEC_TASK_SIZE;
33654 else if (gap > MAX_GAP)
33657 - return PAGE_ALIGN(TASK_SIZE - gap - rnd);
33658 + return PAGE_ALIGN(pax_task_size - gap - rnd);
33662 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33663 * does, but not when emulating X86_32
33665 -static unsigned long mmap_legacy_base(unsigned long rnd)
33666 +static unsigned long mmap_legacy_base(struct mm_struct *mm, unsigned long rnd)
33668 - if (mmap_is_ia32())
33669 + if (mmap_is_ia32()) {
33671 +#ifdef CONFIG_PAX_SEGMEXEC
33672 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
33673 + return SEGMEXEC_TASK_UNMAPPED_BASE;
33677 return TASK_UNMAPPED_BASE;
33680 return TASK_UNMAPPED_BASE + rnd;
33683 @@ -113,16 +126,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
33685 unsigned long random_factor = 0UL;
33687 +#ifdef CONFIG_PAX_RANDMMAP
33688 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33690 if (current->flags & PF_RANDOMIZE)
33691 random_factor = arch_mmap_rnd();
33693 - mm->mmap_legacy_base = mmap_legacy_base(random_factor);
33694 + mm->mmap_legacy_base = mmap_legacy_base(mm, random_factor);
33696 if (mmap_is_legacy()) {
33697 mm->mmap_base = mm->mmap_legacy_base;
33698 mm->get_unmapped_area = arch_get_unmapped_area;
33700 - mm->mmap_base = mmap_base(random_factor);
33701 + mm->mmap_base = mmap_base(mm, random_factor);
33702 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
33705 +#ifdef CONFIG_PAX_RANDMMAP
33706 + if (mm->pax_flags & MF_PAX_RANDMMAP) {
33707 + mm->mmap_legacy_base += mm->delta_mmap;
33708 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33713 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33714 index 0057a7a..95c7edd 100644
33715 --- a/arch/x86/mm/mmio-mod.c
33716 +++ b/arch/x86/mm/mmio-mod.c
33717 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33721 - unsigned char *ip = (unsigned char *)instptr;
33722 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33723 my_trace->opcode = MMIO_UNKNOWN_OP;
33724 my_trace->width = 0;
33725 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33726 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33727 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33728 void __iomem *addr)
33730 - static atomic_t next_id;
33731 + static atomic_unchecked_t next_id;
33732 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33733 /* These are page-unaligned. */
33734 struct mmiotrace_map map = {
33735 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33739 - .id = atomic_inc_return(&next_id)
33740 + .id = atomic_inc_return_unchecked(&next_id)
33742 map.map_id = trace->id;
33744 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33745 ioremap_trace_core(offset, size, addr);
33748 -static void iounmap_trace_core(volatile void __iomem *addr)
33749 +static void iounmap_trace_core(const volatile void __iomem *addr)
33751 struct mmiotrace_map map = {
33753 @@ -328,7 +328,7 @@ not_enabled:
33757 -void mmiotrace_iounmap(volatile void __iomem *addr)
33758 +void mmiotrace_iounmap(const volatile void __iomem *addr)
33761 if (is_enabled()) /* recheck and proper locking in *_core() */
33762 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33763 index 4053bb5..b1ad3dc 100644
33764 --- a/arch/x86/mm/numa.c
33765 +++ b/arch/x86/mm/numa.c
33766 @@ -506,7 +506,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33770 -static int __init numa_register_memblks(struct numa_meminfo *mi)
33771 +static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33773 unsigned long uninitialized_var(pfn_align);
33775 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33776 index 89af288..05381957 100644
33777 --- a/arch/x86/mm/pageattr.c
33778 +++ b/arch/x86/mm/pageattr.c
33779 @@ -260,7 +260,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33781 #ifdef CONFIG_PCI_BIOS
33782 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33783 - pgprot_val(forbidden) |= _PAGE_NX;
33784 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33788 @@ -268,9 +268,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33789 * Does not cover __inittext since that is gone later on. On
33790 * 64bit we do not enforce !NX on the low mapping
33792 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
33793 - pgprot_val(forbidden) |= _PAGE_NX;
33794 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33795 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33797 +#ifdef CONFIG_DEBUG_RODATA
33799 * The .rodata section needs to be read-only. Using the pfn
33800 * catches all aliases.
33801 @@ -278,6 +279,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33802 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33803 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33804 pgprot_val(forbidden) |= _PAGE_RW;
33807 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33809 @@ -316,6 +318,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33813 +#ifdef CONFIG_PAX_KERNEXEC
33814 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33815 + pgprot_val(forbidden) |= _PAGE_RW;
33816 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33820 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33823 @@ -438,23 +447,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33824 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33826 /* change init_mm */
33827 + pax_open_kernel();
33828 set_pte_atomic(kpte, pte);
33830 #ifdef CONFIG_X86_32
33831 if (!SHARED_KERNEL_PMD) {
33833 +#ifdef CONFIG_PAX_PER_CPU_PGD
33834 + unsigned long cpu;
33839 +#ifdef CONFIG_PAX_PER_CPU_PGD
33840 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33841 + pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33843 list_for_each_entry(page, &pgd_list, lru) {
33845 + pgd_t *pgd = (pgd_t *)page_address(page);
33851 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
33852 + pgd += pgd_index(address);
33853 pud = pud_offset(pgd, address);
33854 pmd = pmd_offset(pud, address);
33855 set_pte_atomic((pte_t *)pmd, pte);
33859 + pax_close_kernel();
33863 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33864 index 35af677..e7bf11f 100644
33865 --- a/arch/x86/mm/pat.c
33866 +++ b/arch/x86/mm/pat.c
33867 @@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33868 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33870 if (pg_flags == _PGMT_DEFAULT)
33872 + return _PAGE_CACHE_MODE_NUM;
33873 else if (pg_flags == _PGMT_WC)
33874 return _PAGE_CACHE_MODE_WC;
33875 else if (pg_flags == _PGMT_UC_MINUS)
33876 @@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33878 page = pfn_to_page(pfn);
33879 type = get_page_memtype(page);
33880 - if (type != -1) {
33881 + if (type != _PAGE_CACHE_MODE_NUM) {
33882 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33883 start, end - 1, type, req_type);
33885 @@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33888 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33889 - current->comm, current->pid, start, end - 1);
33890 + current->comm, task_pid_nr(current), start, end - 1);
33894 @@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33895 page = pfn_to_page(paddr >> PAGE_SHIFT);
33896 rettype = get_page_memtype(page);
33898 - * -1 from get_page_memtype() implies RAM page is in its
33899 + * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33900 * default state and not reserved, and hence of type WB
33902 - if (rettype == -1)
33903 + if (rettype == _PAGE_CACHE_MODE_NUM)
33904 rettype = _PAGE_CACHE_MODE_WB;
33907 @@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33909 while (cursor < to) {
33910 if (!devmem_is_allowed(pfn)) {
33911 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
33912 - current->comm, from, to - 1);
33913 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx), PAT prevents it\n",
33914 + current->comm, from, to - 1, cursor);
33917 cursor += PAGE_SIZE;
33918 @@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33919 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33920 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33921 "for [mem %#010Lx-%#010Lx]\n",
33922 - current->comm, current->pid,
33923 + current->comm, task_pid_nr(current),
33925 base, (unsigned long long)(base + size-1));
33927 @@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33928 pcm = lookup_memtype(paddr);
33929 if (want_pcm != pcm) {
33930 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33931 - current->comm, current->pid,
33932 + current->comm, task_pid_nr(current),
33933 cattr_name(want_pcm),
33934 (unsigned long long)paddr,
33935 (unsigned long long)(paddr + size - 1),
33936 @@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33937 free_memtype(paddr, paddr + size);
33938 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33939 " for [mem %#010Lx-%#010Lx], got %s\n",
33940 - current->comm, current->pid,
33941 + current->comm, task_pid_nr(current),
33942 cattr_name(want_pcm),
33943 (unsigned long long)paddr,
33944 (unsigned long long)(paddr + size - 1),
33945 diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33946 index 6582adc..fcc5d0b 100644
33947 --- a/arch/x86/mm/pat_rbtree.c
33948 +++ b/arch/x86/mm/pat_rbtree.c
33949 @@ -161,7 +161,7 @@ success:
33952 printk(KERN_INFO "%s:%d conflicting memory types "
33953 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33954 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33955 end, cattr_name(found_type), cattr_name(match->type));
33958 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33959 index 9f0614d..92ae64a 100644
33960 --- a/arch/x86/mm/pf_in.c
33961 +++ b/arch/x86/mm/pf_in.c
33962 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33964 enum reason_type rv = OTHERS;
33966 - p = (unsigned char *)ins_addr;
33967 + p = (unsigned char *)ktla_ktva(ins_addr);
33968 p += skip_prefix(p, &prf);
33969 p += get_opcode(p, &opcode);
33971 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33972 struct prefix_bits prf;
33975 - p = (unsigned char *)ins_addr;
33976 + p = (unsigned char *)ktla_ktva(ins_addr);
33977 p += skip_prefix(p, &prf);
33978 p += get_opcode(p, &opcode);
33980 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33981 struct prefix_bits prf;
33984 - p = (unsigned char *)ins_addr;
33985 + p = (unsigned char *)ktla_ktva(ins_addr);
33986 p += skip_prefix(p, &prf);
33987 p += get_opcode(p, &opcode);
33989 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33990 struct prefix_bits prf;
33993 - p = (unsigned char *)ins_addr;
33994 + p = (unsigned char *)ktla_ktva(ins_addr);
33995 p += skip_prefix(p, &prf);
33996 p += get_opcode(p, &opcode);
33997 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33998 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33999 struct prefix_bits prf;
34002 - p = (unsigned char *)ins_addr;
34003 + p = (unsigned char *)ktla_ktva(ins_addr);
34004 p += skip_prefix(p, &prf);
34005 p += get_opcode(p, &opcode);
34006 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
34007 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
34008 index 0b97d2c..597bb38 100644
34009 --- a/arch/x86/mm/pgtable.c
34010 +++ b/arch/x86/mm/pgtable.c
34011 @@ -98,10 +98,75 @@ static inline void pgd_list_del(pgd_t *pgd)
34012 list_del(&page->lru);
34015 -#define UNSHARED_PTRS_PER_PGD \
34016 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34017 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34018 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
34020 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
34022 + unsigned int count = USER_PGD_PTRS;
34024 + if (!pax_user_shadow_base)
34028 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
34032 +#ifdef CONFIG_PAX_PER_CPU_PGD
34033 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
34035 + unsigned int count = USER_PGD_PTRS;
34037 + while (count--) {
34040 +#ifdef CONFIG_X86_64
34041 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
34046 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34047 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
34056 +#ifdef CONFIG_X86_64
34057 +#define pxd_t pud_t
34058 +#define pyd_t pgd_t
34059 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
34060 +#define pgtable_pxd_page_ctor(page) true
34061 +#define pgtable_pxd_page_dtor(page) do {} while (0)
34062 +#define pxd_free(mm, pud) pud_free((mm), (pud))
34063 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
34064 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
34065 +#define PYD_SIZE PGDIR_SIZE
34066 +#define mm_inc_nr_pxds(mm) do {} while (0)
34067 +#define mm_dec_nr_pxds(mm) do {} while (0)
34069 +#define pxd_t pmd_t
34070 +#define pyd_t pud_t
34071 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
34072 +#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
34073 +#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
34074 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
34075 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
34076 +#define pyd_offset(mm, address) pud_offset((mm), (address))
34077 +#define PYD_SIZE PUD_SIZE
34078 +#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
34079 +#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
34082 +#ifdef CONFIG_PAX_PER_CPU_PGD
34083 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
34084 +static inline void pgd_dtor(pgd_t *pgd) {}
34086 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
34088 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
34089 @@ -142,6 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
34091 spin_unlock(&pgd_lock);
34096 * List of all pgd's needed for non-PAE so it can invalidate entries
34097 @@ -154,7 +220,7 @@ static void pgd_dtor(pgd_t *pgd)
34101 -#ifdef CONFIG_X86_PAE
34102 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
34104 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
34105 * updating the top-level pagetable entries to guarantee the
34106 @@ -166,7 +232,7 @@ static void pgd_dtor(pgd_t *pgd)
34107 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
34108 * and initialize the kernel pmds here.
34110 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
34111 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34113 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34115 @@ -184,46 +250,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34119 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
34120 +#define PREALLOCATED_PXDS USER_PGD_PTRS
34121 #else /* !CONFIG_X86_PAE */
34123 /* No need to prepopulate any pagetable entries in non-PAE modes. */
34124 -#define PREALLOCATED_PMDS 0
34125 +#define PREALLOCATED_PXDS 0
34127 #endif /* CONFIG_X86_PAE */
34129 -static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
34130 +static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
34134 - for(i = 0; i < PREALLOCATED_PMDS; i++)
34136 - pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
34137 - free_page((unsigned long)pmds[i]);
34138 - mm_dec_nr_pmds(mm);
34139 + for(i = 0; i < PREALLOCATED_PXDS; i++)
34141 + pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
34142 + free_page((unsigned long)pxds[i]);
34143 + mm_dec_nr_pxds(mm);
34147 -static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
34148 +static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
34151 bool failed = false;
34153 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
34154 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
34156 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
34157 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
34160 - if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
34161 - free_page((unsigned long)pmd);
34163 + if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
34164 + free_page((unsigned long)pxd);
34169 - mm_inc_nr_pmds(mm);
34172 + mm_inc_nr_pxds(mm);
34177 - free_pmds(mm, pmds);
34178 + free_pxds(mm, pxds);
34182 @@ -236,43 +304,47 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
34183 * preallocate which never got a corresponding vma will need to be
34186 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
34187 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34191 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
34192 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
34193 pgd_t pgd = pgdp[i];
34195 if (pgd_val(pgd) != 0) {
34196 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34197 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34199 - pgdp[i] = native_make_pgd(0);
34200 + set_pgd(pgdp + i, native_make_pgd(0));
34202 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34203 - pmd_free(mm, pmd);
34204 - mm_dec_nr_pmds(mm);
34205 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34206 + pxd_free(mm, pxd);
34207 + mm_dec_nr_pxds(mm);
34212 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34213 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34219 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34220 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34223 - pud = pud_offset(pgd, 0);
34224 +#ifdef CONFIG_X86_64
34225 + pyd = pyd_offset(mm, 0L);
34227 + pyd = pyd_offset(pgd, 0L);
34230 - for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34231 - pmd_t *pmd = pmds[i];
34232 + for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34233 + pxd_t *pxd = pxds[i];
34235 if (i >= KERNEL_PGD_BOUNDARY)
34236 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34237 - sizeof(pmd_t) * PTRS_PER_PMD);
34238 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34239 + sizeof(pxd_t) * PTRS_PER_PMD);
34241 - pud_populate(mm, pud, pmd);
34242 + pyd_populate(mm, pyd, pxd);
34246 @@ -354,7 +426,7 @@ static inline void _pgd_free(pgd_t *pgd)
34247 pgd_t *pgd_alloc(struct mm_struct *mm)
34250 - pmd_t *pmds[PREALLOCATED_PMDS];
34251 + pxd_t *pxds[PREALLOCATED_PXDS];
34253 pgd = _pgd_alloc();
34255 @@ -363,11 +435,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34259 - if (preallocate_pmds(mm, pmds) != 0)
34260 + if (preallocate_pxds(mm, pxds) != 0)
34263 if (paravirt_pgd_alloc(mm) != 0)
34264 - goto out_free_pmds;
34265 + goto out_free_pxds;
34268 * Make sure that pre-populating the pmds is atomic with
34269 @@ -377,14 +449,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34270 spin_lock(&pgd_lock);
34273 - pgd_prepopulate_pmd(mm, pgd, pmds);
34274 + pgd_prepopulate_pxd(mm, pgd, pxds);
34276 spin_unlock(&pgd_lock);
34281 - free_pmds(mm, pmds);
34283 + free_pxds(mm, pxds);
34287 @@ -393,7 +465,7 @@ out:
34289 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34291 - pgd_mop_up_pmds(mm, pgd);
34292 + pgd_mop_up_pxds(mm, pgd);
34294 paravirt_pgd_free(mm, pgd);
34296 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34297 index 75cc097..79a097f 100644
34298 --- a/arch/x86/mm/pgtable_32.c
34299 +++ b/arch/x86/mm/pgtable_32.c
34300 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34303 pte = pte_offset_kernel(pmd, vaddr);
34305 + pax_open_kernel();
34306 if (pte_val(pteval))
34307 set_pte_at(&init_mm, vaddr, pte, pteval);
34309 pte_clear(&init_mm, vaddr, pte);
34310 + pax_close_kernel();
34313 * It's enough to flush this one mapping.
34314 diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34315 index e666cbb..61788c45 100644
34316 --- a/arch/x86/mm/physaddr.c
34317 +++ b/arch/x86/mm/physaddr.c
34319 #ifdef CONFIG_X86_64
34321 #ifdef CONFIG_DEBUG_VIRTUAL
34322 -unsigned long __phys_addr(unsigned long x)
34323 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34325 unsigned long y = x - __START_KERNEL_map;
34327 @@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34330 #ifdef CONFIG_DEBUG_VIRTUAL
34331 -unsigned long __phys_addr(unsigned long x)
34332 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34334 unsigned long phys_addr = x - PAGE_OFFSET;
34335 /* VMALLOC_* aren't constants */
34336 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34337 index 90555bf..f5f1828 100644
34338 --- a/arch/x86/mm/setup_nx.c
34339 +++ b/arch/x86/mm/setup_nx.c
34341 #include <asm/pgtable.h>
34342 #include <asm/proto.h>
34344 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34345 static int disable_nx;
34347 +#ifndef CONFIG_PAX_PAGEEXEC
34351 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34354 early_param("noexec", noexec_setup);
34359 void x86_configure_nx(void)
34361 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34362 if (cpu_has_nx && !disable_nx)
34363 __supported_pte_mask |= _PAGE_NX;
34366 __supported_pte_mask &= ~_PAGE_NX;
34369 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34370 index 3250f23..7a97ba2 100644
34371 --- a/arch/x86/mm/tlb.c
34372 +++ b/arch/x86/mm/tlb.c
34373 @@ -45,7 +45,11 @@ void leave_mm(int cpu)
34375 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34376 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34378 +#ifndef CONFIG_PAX_PER_CPU_PGD
34379 load_cr3(swapper_pg_dir);
34383 * This gets called in the idle path where RCU
34384 * functions differently. Tracing normally
34385 diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34386 new file mode 100644
34387 index 0000000..3fda3f3
34389 +++ b/arch/x86/mm/uderef_64.c
34391 +#include <linux/mm.h>
34392 +#include <asm/pgtable.h>
34393 +#include <asm/uaccess.h>
34395 +#ifdef CONFIG_PAX_MEMORY_UDEREF
34396 +/* PaX: due to the special call convention these functions must
34397 + * - remain leaf functions under all configurations,
34398 + * - never be called directly, only dereferenced from the wrappers.
34400 +void __used __pax_open_userland(void)
34402 + unsigned int cpu;
34404 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
34407 + cpu = raw_get_cpu();
34408 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34409 + write_cr3(__pa_nodebug(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34410 + raw_put_cpu_no_resched();
34412 +EXPORT_SYMBOL(__pax_open_userland);
34414 +void __used __pax_close_userland(void)
34416 + unsigned int cpu;
34418 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
34421 + cpu = raw_get_cpu();
34422 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34423 + write_cr3(__pa_nodebug(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34424 + raw_put_cpu_no_resched();
34426 +EXPORT_SYMBOL(__pax_close_userland);
34428 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34429 index 6440221..f84b5c7 100644
34430 --- a/arch/x86/net/bpf_jit.S
34431 +++ b/arch/x86/net/bpf_jit.S
34434 #include <linux/linkage.h>
34435 #include <asm/dwarf2.h>
34436 +#include <asm/alternative-asm.h>
34439 * Calling convention :
34440 @@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34441 jle bpf_slow_path_word
34442 mov (SKBDATA,%rsi),%eax
34443 bswap %eax /* ntohl() */
34444 + pax_force_retaddr
34448 @@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34449 jle bpf_slow_path_half
34450 movzwl (SKBDATA,%rsi),%eax
34451 rol $8,%ax # ntohs()
34452 + pax_force_retaddr
34456 @@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34457 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34458 jle bpf_slow_path_byte
34459 movzbl (SKBDATA,%rsi),%eax
34460 + pax_force_retaddr
34463 /* rsi contains offset and can be scratched */
34464 @@ -90,6 +94,7 @@ bpf_slow_path_word:
34466 mov - MAX_BPF_STACK + 32(%rbp),%eax
34468 + pax_force_retaddr
34471 bpf_slow_path_half:
34472 @@ -98,12 +103,14 @@ bpf_slow_path_half:
34473 mov - MAX_BPF_STACK + 32(%rbp),%ax
34476 + pax_force_retaddr
34479 bpf_slow_path_byte:
34480 bpf_slow_path_common(1)
34482 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34483 + pax_force_retaddr
34486 #define sk_negative_common(SIZE) \
34487 @@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34488 sk_negative_common(4)
34491 + pax_force_retaddr
34494 bpf_slow_path_half_neg:
34495 @@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34499 + pax_force_retaddr
34502 bpf_slow_path_byte_neg:
34503 @@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34504 .globl sk_load_byte_negative_offset
34505 sk_negative_common(1)
34506 movzbl (%rax), %eax
34507 + pax_force_retaddr
34511 @@ -156,4 +166,5 @@ bpf_error:
34512 mov - MAX_BPF_STACK + 16(%rbp),%r14
34513 mov - MAX_BPF_STACK + 24(%rbp),%r15
34515 + pax_force_retaddr
34517 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34518 index ddeff48..877ead6 100644
34519 --- a/arch/x86/net/bpf_jit_comp.c
34520 +++ b/arch/x86/net/bpf_jit_comp.c
34522 #include <linux/if_vlan.h>
34523 #include <asm/cacheflush.h>
34525 +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34526 +int bpf_jit_enable __read_only;
34528 int bpf_jit_enable __read_mostly;
34532 * assembly code in arch/x86/net/bpf_jit.S
34533 @@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34534 static void jit_fill_hole(void *area, unsigned int size)
34536 /* fill whole space with int3 instructions */
34537 + pax_open_kernel();
34538 memset(area, 0xcc, size);
34539 + pax_close_kernel();
34542 struct jit_context {
34543 @@ -924,7 +930,9 @@ common_load:
34544 pr_err("bpf_jit_compile fatal error\n");
34547 + pax_open_kernel();
34548 memcpy(image + proglen, temp, ilen);
34549 + pax_close_kernel();
34552 addrs[i] = proglen;
34553 @@ -1001,7 +1009,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34556 bpf_flush_icache(header, image + proglen);
34557 - set_memory_ro((unsigned long)header, header->pages);
34558 prog->bpf_func = (void *)image;
34559 prog->jited = true;
34561 @@ -1014,12 +1021,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34562 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34563 struct bpf_binary_header *header = (void *)addr;
34566 - goto free_filter;
34568 + bpf_jit_binary_free(header);
34570 - set_memory_rw(addr, header->pages);
34571 - bpf_jit_binary_free(header);
34574 bpf_prog_unlock_free(fp);
34576 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34577 index 4e664bd..2beeaa2 100644
34578 --- a/arch/x86/oprofile/backtrace.c
34579 +++ b/arch/x86/oprofile/backtrace.c
34580 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34581 struct stack_frame_ia32 *fp;
34582 unsigned long bytes;
34584 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34585 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34589 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34590 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34592 oprofile_add_trace(bufhead[0].return_address);
34594 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34595 struct stack_frame bufhead[2];
34596 unsigned long bytes;
34598 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34599 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34603 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34604 index 1d2e639..f6ef82a 100644
34605 --- a/arch/x86/oprofile/nmi_int.c
34606 +++ b/arch/x86/oprofile/nmi_int.c
34608 #include <asm/nmi.h>
34609 #include <asm/msr.h>
34610 #include <asm/apic.h>
34611 +#include <asm/pgtable.h>
34613 #include "op_counter.h"
34614 #include "op_x86_model.h"
34615 @@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34619 - if (!model->num_virt_counters)
34620 - model->num_virt_counters = model->num_counters;
34621 + if (!model->num_virt_counters) {
34622 + pax_open_kernel();
34623 + *(unsigned int *)&model->num_virt_counters = model->num_counters;
34624 + pax_close_kernel();
34629 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34630 index 50d86c0..7985318 100644
34631 --- a/arch/x86/oprofile/op_model_amd.c
34632 +++ b/arch/x86/oprofile/op_model_amd.c
34633 @@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34634 num_counters = AMD64_NUM_COUNTERS;
34637 - op_amd_spec.num_counters = num_counters;
34638 - op_amd_spec.num_controls = num_counters;
34639 - op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34640 + pax_open_kernel();
34641 + *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34642 + *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34643 + *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34644 + pax_close_kernel();
34648 diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34649 index d90528e..0127e2b 100644
34650 --- a/arch/x86/oprofile/op_model_ppro.c
34651 +++ b/arch/x86/oprofile/op_model_ppro.c
34653 #include <asm/msr.h>
34654 #include <asm/apic.h>
34655 #include <asm/nmi.h>
34656 +#include <asm/pgtable.h>
34658 #include "op_x86_model.h"
34659 #include "op_counter.h"
34660 @@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34662 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34664 - op_arch_perfmon_spec.num_counters = num_counters;
34665 - op_arch_perfmon_spec.num_controls = num_counters;
34666 + pax_open_kernel();
34667 + *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34668 + *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34669 + pax_close_kernel();
34672 static int arch_perfmon_init(struct oprofile_operations *ignore)
34673 diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34674 index 71e8a67..6a313bb 100644
34675 --- a/arch/x86/oprofile/op_x86_model.h
34676 +++ b/arch/x86/oprofile/op_x86_model.h
34677 @@ -52,7 +52,7 @@ struct op_x86_model_spec {
34678 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34679 struct op_msrs const * const msrs);
34684 struct op_counter_config;
34686 diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34687 index 852aa4c..71613f2 100644
34688 --- a/arch/x86/pci/intel_mid_pci.c
34689 +++ b/arch/x86/pci/intel_mid_pci.c
34690 @@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34691 pci_mmcfg_late_init();
34692 pcibios_enable_irq = intel_mid_pci_irq_enable;
34693 pcibios_disable_irq = intel_mid_pci_irq_disable;
34694 - pci_root_ops = intel_mid_pci_ops;
34695 + memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34697 /* Continue with standard init */
34699 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34700 index 5dc6ca5..25c03f5 100644
34701 --- a/arch/x86/pci/irq.c
34702 +++ b/arch/x86/pci/irq.c
34703 @@ -51,7 +51,7 @@ struct irq_router {
34704 struct irq_router_handler {
34706 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34710 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34711 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34712 @@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34716 -static __initdata struct irq_router_handler pirq_routers[] = {
34717 +static __initconst const struct irq_router_handler pirq_routers[] = {
34718 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34719 { PCI_VENDOR_ID_AL, ali_router_probe },
34720 { PCI_VENDOR_ID_ITE, ite_router_probe },
34721 @@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34722 static void __init pirq_find_router(struct irq_router *r)
34724 struct irq_routing_table *rt = pirq_table;
34725 - struct irq_router_handler *h;
34726 + const struct irq_router_handler *h;
34728 #ifdef CONFIG_PCI_BIOS
34729 if (!rt->signature) {
34730 @@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34734 -static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34735 +static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34737 .callback = fix_broken_hp_bios_irq9,
34738 .ident = "HP Pavilion N5400 Series Laptop",
34739 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34740 index 9b83b90..4112152 100644
34741 --- a/arch/x86/pci/pcbios.c
34742 +++ b/arch/x86/pci/pcbios.c
34743 @@ -79,7 +79,7 @@ union bios32 {
34745 unsigned long address;
34746 unsigned short segment;
34747 -} bios32_indirect __initdata = { 0, __KERNEL_CS };
34748 +} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34751 * Returns the entry point for the given service, NULL on error
34752 @@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34753 unsigned long length; /* %ecx */
34754 unsigned long entry; /* %edx */
34755 unsigned long flags;
34756 + struct desc_struct d, *gdt;
34758 local_irq_save(flags);
34759 - __asm__("lcall *(%%edi); cld"
34761 + gdt = get_cpu_gdt_table(smp_processor_id());
34763 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34764 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34765 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34766 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34768 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34769 : "=a" (return_code),
34775 - "D" (&bios32_indirect));
34776 + "D" (&bios32_indirect),
34777 + "r"(__PCIBIOS_DS)
34780 + pax_open_kernel();
34781 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34782 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34783 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34784 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34785 + pax_close_kernel();
34787 local_irq_restore(flags);
34789 switch (return_code) {
34791 - return address + entry;
34792 - case 0x80: /* Not present */
34793 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34795 - default: /* Shouldn't happen */
34796 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34797 - service, return_code);
34800 + unsigned char flags;
34802 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34803 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34804 + printk(KERN_WARNING "bios32_service: not valid\n");
34807 + address = address + PAGE_OFFSET;
34808 + length += 16UL; /* some BIOSs underreport this... */
34810 + if (length >= 64*1024*1024) {
34811 + length >>= PAGE_SHIFT;
34815 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34816 + gdt = get_cpu_gdt_table(cpu);
34817 + pack_descriptor(&d, address, length, 0x9b, flags);
34818 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34819 + pack_descriptor(&d, address, length, 0x93, flags);
34820 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34824 + case 0x80: /* Not present */
34825 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34827 + default: /* Shouldn't happen */
34828 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34829 + service, return_code);
34835 unsigned long address;
34836 unsigned short segment;
34837 -} pci_indirect = { 0, __KERNEL_CS };
34838 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34840 -static int pci_bios_present;
34841 +static int pci_bios_present __read_only;
34843 static int __init check_pcibios(void)
34845 @@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34846 unsigned long flags, pcibios_entry;
34848 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34849 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34850 + pci_indirect.address = pcibios_entry;
34852 local_irq_save(flags);
34854 - "lcall *(%%edi); cld\n\t"
34855 + __asm__("movw %w6, %%ds\n\t"
34856 + "lcall *%%ss:(%%edi); cld\n\t"
34862 @@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34865 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34866 - "D" (&pci_indirect)
34867 + "D" (&pci_indirect),
34868 + "r" (__PCIBIOS_DS)
34870 local_irq_restore(flags);
34872 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34876 - __asm__("lcall *(%%esi); cld\n\t"
34877 + __asm__("movw %w6, %%ds\n\t"
34878 + "lcall *%%ss:(%%esi); cld\n\t"
34884 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34885 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34888 - "S" (&pci_indirect));
34889 + "S" (&pci_indirect),
34890 + "r" (__PCIBIOS_DS));
34892 * Zero-extend the result beyond 8 bits, do not trust the
34893 * BIOS having done it:
34894 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34898 - __asm__("lcall *(%%esi); cld\n\t"
34899 + __asm__("movw %w6, %%ds\n\t"
34900 + "lcall *%%ss:(%%esi); cld\n\t"
34906 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34907 : "1" (PCIBIOS_READ_CONFIG_WORD),
34910 - "S" (&pci_indirect));
34911 + "S" (&pci_indirect),
34912 + "r" (__PCIBIOS_DS));
34914 * Zero-extend the result beyond 16 bits, do not trust the
34915 * BIOS having done it:
34916 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34920 - __asm__("lcall *(%%esi); cld\n\t"
34921 + __asm__("movw %w6, %%ds\n\t"
34922 + "lcall *%%ss:(%%esi); cld\n\t"
34928 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34929 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34932 - "S" (&pci_indirect));
34933 + "S" (&pci_indirect),
34934 + "r" (__PCIBIOS_DS));
34938 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34942 - __asm__("lcall *(%%esi); cld\n\t"
34943 + __asm__("movw %w6, %%ds\n\t"
34944 + "lcall *%%ss:(%%esi); cld\n\t"
34950 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34954 - "S" (&pci_indirect));
34955 + "S" (&pci_indirect),
34956 + "r" (__PCIBIOS_DS));
34959 - __asm__("lcall *(%%esi); cld\n\t"
34960 + __asm__("movw %w6, %%ds\n\t"
34961 + "lcall *%%ss:(%%esi); cld\n\t"
34967 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34971 - "S" (&pci_indirect));
34972 + "S" (&pci_indirect),
34973 + "r" (__PCIBIOS_DS));
34976 - __asm__("lcall *(%%esi); cld\n\t"
34977 + __asm__("movw %w6, %%ds\n\t"
34978 + "lcall *%%ss:(%%esi); cld\n\t"
34984 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34988 - "S" (&pci_indirect));
34989 + "S" (&pci_indirect),
34990 + "r" (__PCIBIOS_DS));
34994 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34996 DBG("PCI: Fetching IRQ routing table... ");
34997 __asm__("push %%es\n\t"
34998 + "movw %w8, %%ds\n\t"
35001 - "lcall *(%%esi); cld\n\t"
35002 + "lcall *%%ss:(%%esi); cld\n\t"
35009 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
35012 "S" (&pci_indirect),
35015 + "r" (__PCIBIOS_DS)
35017 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
35019 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35023 - __asm__("lcall *(%%esi); cld\n\t"
35024 + __asm__("movw %w5, %%ds\n\t"
35025 + "lcall *%%ss:(%%esi); cld\n\t"
35031 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35032 : "0" (PCIBIOS_SET_PCI_HW_INT),
35033 "b" ((dev->bus->number << 8) | dev->devfn),
35034 "c" ((irq << 8) | (pin + 10)),
35035 - "S" (&pci_indirect));
35036 + "S" (&pci_indirect),
35037 + "r" (__PCIBIOS_DS));
35038 return !(ret & 0xff00);
35040 EXPORT_SYMBOL(pcibios_set_irq_routing);
35041 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
35042 index ed5b673..24d2d53 100644
35043 --- a/arch/x86/platform/efi/efi_32.c
35044 +++ b/arch/x86/platform/efi/efi_32.c
35045 @@ -61,11 +61,27 @@ pgd_t * __init efi_call_phys_prolog(void)
35046 struct desc_ptr gdt_descr;
35049 +#ifdef CONFIG_PAX_KERNEXEC
35050 + struct desc_struct d;
35053 /* Current pgd is swapper_pg_dir, we'll restore it later: */
35054 +#ifdef CONFIG_PAX_PER_CPU_PGD
35055 + save_pgd = get_cpu_pgd(smp_processor_id(), kernel);
35057 save_pgd = swapper_pg_dir;
35060 load_cr3(initial_page_table);
35063 +#ifdef CONFIG_PAX_KERNEXEC
35064 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
35065 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35066 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
35067 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35070 gdt_descr.address = __pa(get_cpu_gdt_table(0));
35071 gdt_descr.size = GDT_SIZE - 1;
35072 load_gdt(&gdt_descr);
35073 @@ -77,6 +93,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
35075 struct desc_ptr gdt_descr;
35077 +#ifdef CONFIG_PAX_KERNEXEC
35078 + struct desc_struct d;
35080 + memset(&d, 0, sizeof d);
35081 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35082 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35085 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
35086 gdt_descr.size = GDT_SIZE - 1;
35087 load_gdt(&gdt_descr);
35088 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
35089 index a0ac0f9..f41d324 100644
35090 --- a/arch/x86/platform/efi/efi_64.c
35091 +++ b/arch/x86/platform/efi/efi_64.c
35092 @@ -96,6 +96,11 @@ pgd_t * __init efi_call_phys_prolog(void)
35093 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
35094 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
35097 +#ifdef CONFIG_PAX_PER_CPU_PGD
35098 + load_cr3(swapper_pg_dir);
35104 @@ -119,6 +124,10 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
35108 +#ifdef CONFIG_PAX_PER_CPU_PGD
35109 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35113 early_code_mapping_set_exec(0);
35115 @@ -148,8 +157,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
35119 - if (efi_enabled(EFI_OLD_MEMMAP))
35120 + if (efi_enabled(EFI_OLD_MEMMAP)) {
35121 + /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
35122 + * able to execute the EFI services.
35124 + if (__supported_pte_mask & _PAGE_NX) {
35125 + unsigned long addr = (unsigned long) __va(0);
35126 + pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
35128 + pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
35129 +#ifdef CONFIG_PAX_PER_CPU_PGD
35130 + set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
35132 + set_pgd(pgd_offset_k(addr), pe);
35138 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
35139 pgd = __va(efi_scratch.efi_pgt);
35140 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
35141 index 040192b..7d3300f 100644
35142 --- a/arch/x86/platform/efi/efi_stub_32.S
35143 +++ b/arch/x86/platform/efi/efi_stub_32.S
35147 #include <linux/linkage.h>
35148 +#include <linux/init.h>
35149 #include <asm/page_types.h>
35150 +#include <asm/segment.h>
35153 * efi_call_phys(void *, ...) is a function with variable parameters.
35155 * service functions will comply with gcc calling convention, too.
35160 ENTRY(efi_call_phys)
35162 * 0. The function can only be called in Linux kernel. So CS has been
35163 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
35164 * The mapping of lower virtual memory has been created in prolog and
35168 - subl $__PAGE_OFFSET, %edx
35170 +#ifdef CONFIG_PAX_KERNEXEC
35171 + movl $(__KERNEXEC_EFI_DS), %edx
35179 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
35185 + jmp 1f-__PAGE_OFFSET
35190 * 2. Now on the top of stack is the return
35191 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
35192 * parameter 2, ..., param n. To make things easy, we save the return
35193 * address of efi_call_phys in a global variable.
35196 - movl %edx, saved_return_addr
35197 - /* get the function pointer into ECX*/
35199 - movl %ecx, efi_rt_function_ptr
35201 - subl $__PAGE_OFFSET, %edx
35203 + popl (saved_return_addr)
35204 + popl (efi_rt_function_ptr)
35207 * 3. Clear PG bit in %CR0.
35208 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35210 * 5. Call the physical function.
35213 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
35217 * 6. After EFI runtime service returns, control will return to
35218 * following instruction. We'd better readjust stack pointer first.
35219 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35221 orl $0x80000000, %edx
35227 * 8. Now restore the virtual mode from flat mode by
35228 * adding EIP with PAGE_OFFSET.
35232 +#ifdef CONFIG_PAX_KERNEXEC
35233 + movl $(__KERNEL_DS), %edx
35237 + ljmp $(__KERNEL_CS),$1f
35239 + jmp 1f+__PAGE_OFFSET
35244 * 9. Balance the stack. And because EAX contain the return value,
35245 * we'd better not clobber it.
35247 - leal efi_rt_function_ptr, %edx
35248 - movl (%edx), %ecx
35250 + pushl (efi_rt_function_ptr)
35253 - * 10. Push the saved return address onto the stack and return.
35254 + * 10. Return to the saved return address.
35256 - leal saved_return_addr, %edx
35257 - movl (%edx), %ecx
35260 + jmpl *(saved_return_addr)
35261 ENDPROC(efi_call_phys)
35268 efi_rt_function_ptr:
35269 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35270 index 86d0f9e..6d499f4 100644
35271 --- a/arch/x86/platform/efi/efi_stub_64.S
35272 +++ b/arch/x86/platform/efi/efi_stub_64.S
35274 #include <asm/msr.h>
35275 #include <asm/processor-flags.h>
35276 #include <asm/page_types.h>
35277 +#include <asm/alternative-asm.h>
35281 @@ -88,6 +89,7 @@ ENTRY(efi_call)
35285 + pax_force_retaddr 0, 1
35289 diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35290 index 3005f0c..d06aeb0 100644
35291 --- a/arch/x86/platform/intel-mid/intel-mid.c
35292 +++ b/arch/x86/platform/intel-mid/intel-mid.c
35293 @@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35294 /* intel_mid_ops to store sub arch ops */
35295 struct intel_mid_ops *intel_mid_ops;
35296 /* getter function for sub arch ops*/
35297 -static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35298 +static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35299 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35300 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35302 @@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35306 -static void intel_mid_reboot(void)
35307 +static void __noreturn intel_mid_reboot(void)
35309 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35313 static unsigned long __init intel_mid_calibrate_tsc(void)
35314 diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35315 index 3c1c386..59a68ed 100644
35316 --- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35317 +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35319 /* For every CPU addition a new get_<cpuname>_ops interface needs
35322 -extern void *get_penwell_ops(void);
35323 -extern void *get_cloverview_ops(void);
35324 -extern void *get_tangier_ops(void);
35325 +extern const void *get_penwell_ops(void);
35326 +extern const void *get_cloverview_ops(void);
35327 +extern const void *get_tangier_ops(void);
35328 diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35329 index 23381d2..8ddc10e 100644
35330 --- a/arch/x86/platform/intel-mid/mfld.c
35331 +++ b/arch/x86/platform/intel-mid/mfld.c
35332 @@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35333 pm_power_off = mfld_power_off;
35336 -void *get_penwell_ops(void)
35337 +const void *get_penwell_ops(void)
35339 return &penwell_ops;
35342 -void *get_cloverview_ops(void)
35343 +const void *get_cloverview_ops(void)
35345 return &penwell_ops;
35347 diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35348 index aaca917..66eadbc 100644
35349 --- a/arch/x86/platform/intel-mid/mrfl.c
35350 +++ b/arch/x86/platform/intel-mid/mrfl.c
35351 @@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35352 .arch_setup = tangier_arch_setup,
35355 -void *get_tangier_ops(void)
35356 +const void *get_tangier_ops(void)
35358 return &tangier_ops;
35360 diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35361 index 278e4da..55e8d8a 100644
35362 --- a/arch/x86/platform/intel-quark/imr_selftest.c
35363 +++ b/arch/x86/platform/intel-quark/imr_selftest.c
35364 @@ -55,7 +55,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35366 static void __init imr_self_test(void)
35368 - phys_addr_t base = virt_to_phys(&_text);
35369 + phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35370 size_t size = virt_to_phys(&__end_rodata) - base;
35371 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35373 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35374 index d6ee929..3637cb5 100644
35375 --- a/arch/x86/platform/olpc/olpc_dt.c
35376 +++ b/arch/x86/platform/olpc/olpc_dt.c
35377 @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35381 -static struct of_pdt_ops prom_olpc_ops __initdata = {
35382 +static struct of_pdt_ops prom_olpc_ops __initconst = {
35383 .nextprop = olpc_dt_nextprop,
35384 .getproplen = olpc_dt_getproplen,
35385 .getproperty = olpc_dt_getproperty,
35386 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35387 index 757678f..9895d9b 100644
35388 --- a/arch/x86/power/cpu.c
35389 +++ b/arch/x86/power/cpu.c
35390 @@ -134,11 +134,8 @@ static void do_fpu_end(void)
35391 static void fix_processor_context(void)
35393 int cpu = smp_processor_id();
35394 - struct tss_struct *t = &per_cpu(cpu_tss, cpu);
35395 -#ifdef CONFIG_X86_64
35396 - struct desc_struct *desc = get_cpu_gdt_table(cpu);
35399 + struct tss_struct *t = cpu_tss + cpu;
35401 set_tss_desc(cpu, t); /*
35402 * This just modifies memory; should not be
35403 * necessary. But... This is necessary, because
35404 @@ -147,10 +144,6 @@ static void fix_processor_context(void)
35407 #ifdef CONFIG_X86_64
35408 - memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35409 - tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35410 - write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35412 syscall_init(); /* This sets MSR_*STAR and related */
35414 load_TR_desc(); /* This does ltr */
35415 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35416 index 0b7a63d..0d0f2c2 100644
35417 --- a/arch/x86/realmode/init.c
35418 +++ b/arch/x86/realmode/init.c
35419 @@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35420 __va(real_mode_header->trampoline_header);
35422 #ifdef CONFIG_X86_32
35423 - trampoline_header->start = __pa_symbol(startup_32_smp);
35424 + trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35426 +#ifdef CONFIG_PAX_KERNEXEC
35427 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35430 + trampoline_header->boot_cs = __BOOT_CS;
35431 trampoline_header->gdt_limit = __BOOT_DS + 7;
35432 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35434 @@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35435 *trampoline_cr4_features = __read_cr4();
35437 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35438 - trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35439 + trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35440 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35443 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35444 index 2730d77..2e4cd19 100644
35445 --- a/arch/x86/realmode/rm/Makefile
35446 +++ b/arch/x86/realmode/rm/Makefile
35447 @@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35449 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35450 -I$(srctree)/arch/x86/boot
35451 +ifdef CONSTIFY_PLUGIN
35452 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35454 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35456 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35457 index a28221d..93c40f1 100644
35458 --- a/arch/x86/realmode/rm/header.S
35459 +++ b/arch/x86/realmode/rm/header.S
35460 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35462 /* APM/BIOS reboot */
35463 .long pa_machine_real_restart_asm
35464 -#ifdef CONFIG_X86_64
35465 +#ifdef CONFIG_X86_32
35466 + .long __KERNEL_CS
35468 .long __KERNEL32_CS
35470 END(real_mode_header)
35471 diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35472 index d66c607..3def845 100644
35473 --- a/arch/x86/realmode/rm/reboot.S
35474 +++ b/arch/x86/realmode/rm/reboot.S
35475 @@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35478 /* Disable paging to drop us out of long mode */
35480 + andl $~X86_CR4_PCIDE, %eax
35484 andl $~X86_CR0_PG, %eax
35486 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35487 index 48ddd76..c26749f 100644
35488 --- a/arch/x86/realmode/rm/trampoline_32.S
35489 +++ b/arch/x86/realmode/rm/trampoline_32.S
35491 #include <asm/page_types.h>
35492 #include "realmode.h"
35494 +#ifdef CONFIG_PAX_KERNEXEC
35497 +#define ta(X) (pa_ ## X)
35503 @@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35505 cli # We should be safe anyway
35507 - movl tr_start, %eax # where we need to go
35509 movl $0xA5A5A5A5, trampoline_status
35510 # write marker for master knows we're running
35512 @@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35513 movw $1, %dx # protected mode (PE) bit
35514 lmsw %dx # into protected mode
35516 - ljmpl $__BOOT_CS, $pa_startup_32
35517 + ljmpl *(trampoline_header)
35519 .section ".text32","ax"
35521 @@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35523 GLOBAL(trampoline_header)
35525 - tr_gdt_pad: .space 2
35526 + tr_boot_cs: .space 2
35528 END(trampoline_header)
35530 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35531 index dac7b20..72dbaca 100644
35532 --- a/arch/x86/realmode/rm/trampoline_64.S
35533 +++ b/arch/x86/realmode/rm/trampoline_64.S
35534 @@ -93,6 +93,7 @@ ENTRY(startup_32)
35537 movl pa_tr_cr4, %eax
35538 + andl $~X86_CR4_PCIDE, %eax
35539 movl %eax, %cr4 # Enable PAE mode
35541 # Setup trampoline 4 level pagetables
35542 @@ -106,7 +107,7 @@ ENTRY(startup_32)
35545 # Enable paging and in turn activate Long Mode
35546 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35547 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
35551 diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35552 index 9e7e147..25a4158 100644
35553 --- a/arch/x86/realmode/rm/wakeup_asm.S
35554 +++ b/arch/x86/realmode/rm/wakeup_asm.S
35555 @@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35558 /* This really couldn't... */
35559 - movl pmode_entry, %eax
35560 movl pmode_cr0, %ecx
35562 - ljmpl $__KERNEL_CS, $pa_startup_32
35563 - /* -> jmp *%eax in trampoline_32.S */
35565 + ljmpl *pmode_entry
35567 jmp trampoline_start
35569 diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35570 index 604a37e..e49702a 100644
35571 --- a/arch/x86/tools/Makefile
35572 +++ b/arch/x86/tools/Makefile
35573 @@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35575 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35577 -HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35578 +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35579 hostprogs-y += relocs
35580 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35582 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35583 index 0c2fae8..88036b7 100644
35584 --- a/arch/x86/tools/relocs.c
35585 +++ b/arch/x86/tools/relocs.c
35587 /* This is included from relocs_32/64.c */
35589 +#include "../../../include/generated/autoconf.h"
35591 #define ElfW(type) _ElfW(ELF_BITS, type)
35592 #define _ElfW(bits, type) __ElfW(bits, type)
35593 #define __ElfW(bits, type) Elf##bits##_##type
35595 #define Elf_Sym ElfW(Sym)
35597 static Elf_Ehdr ehdr;
35598 +static Elf_Phdr *phdr;
35602 @@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35606 +static void read_phdrs(FILE *fp)
35610 + phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35612 + die("Unable to allocate %d program headers\n",
35615 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35616 + die("Seek to %d failed: %s\n",
35617 + ehdr.e_phoff, strerror(errno));
35619 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35620 + die("Cannot read ELF program headers: %s\n",
35621 + strerror(errno));
35623 + for(i = 0; i < ehdr.e_phnum; i++) {
35624 + phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35625 + phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35626 + phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35627 + phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35628 + phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35629 + phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35630 + phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35631 + phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35636 static void read_shdrs(FILE *fp)
35642 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35643 @@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35645 static void read_strtabs(FILE *fp)
35649 for (i = 0; i < ehdr.e_shnum; i++) {
35650 struct section *sec = &secs[i];
35651 if (sec->shdr.sh_type != SHT_STRTAB) {
35652 @@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35654 static void read_symtabs(FILE *fp)
35657 + unsigned int i,j;
35658 for (i = 0; i < ehdr.e_shnum; i++) {
35659 struct section *sec = &secs[i];
35660 if (sec->shdr.sh_type != SHT_SYMTAB) {
35661 @@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35665 -static void read_relocs(FILE *fp)
35666 +static void read_relocs(FILE *fp, int use_real_mode)
35669 + unsigned int i,j;
35672 for (i = 0; i < ehdr.e_shnum; i++) {
35673 struct section *sec = &secs[i];
35674 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35675 @@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35676 die("Cannot read symbol table: %s\n",
35681 +#ifdef CONFIG_X86_32
35682 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35683 + if (phdr[j].p_type != PT_LOAD )
35685 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35687 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35692 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35693 Elf_Rel *rel = &sec->reltab[j];
35694 - rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35695 + rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35696 rel->r_info = elf_xword_to_cpu(rel->r_info);
35697 #if (SHT_REL_TYPE == SHT_RELA)
35698 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35699 @@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35701 static void print_absolute_symbols(void)
35705 const char *format;
35707 if (ELF_BITS == 64)
35708 @@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35709 for (i = 0; i < ehdr.e_shnum; i++) {
35710 struct section *sec = &secs[i];
35715 if (sec->shdr.sh_type != SHT_SYMTAB) {
35717 @@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35719 static void print_absolute_relocs(void)
35721 - int i, printed = 0;
35722 + unsigned int i, printed = 0;
35723 const char *format;
35725 if (ELF_BITS == 64)
35726 @@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35727 struct section *sec_applies, *sec_symtab;
35729 Elf_Sym *sh_symtab;
35732 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35735 @@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35736 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35737 Elf_Sym *sym, const char *symname))
35741 /* Walk through the relocations */
35742 for (i = 0; i < ehdr.e_shnum; i++) {
35744 Elf_Sym *sh_symtab;
35745 struct section *sec_applies, *sec_symtab;
35748 struct section *sec = &secs[i];
35750 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35751 @@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35753 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35754 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35755 + char *sym_strtab = sec->link->link->strtab;
35757 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35758 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35761 +#ifdef CONFIG_PAX_KERNEXEC
35762 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35763 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35765 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35767 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35769 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35775 @@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35777 static void emit_relocs(int as_text, int use_real_mode)
35781 int (*write_reloc)(uint32_t, FILE *) = write32;
35782 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35783 const char *symname);
35784 @@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35786 regex_init(use_real_mode);
35793 + read_relocs(fp, use_real_mode);
35794 if (ELF_BITS == 64)
35796 if (show_absolute_syms) {
35797 diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35798 index f40281e..92728c9 100644
35799 --- a/arch/x86/um/mem_32.c
35800 +++ b/arch/x86/um/mem_32.c
35801 @@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35802 gate_vma.vm_start = FIXADDR_USER_START;
35803 gate_vma.vm_end = FIXADDR_USER_END;
35804 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35805 - gate_vma.vm_page_prot = __P101;
35806 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35810 diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35811 index 80ffa5b..a33bd15 100644
35812 --- a/arch/x86/um/tls_32.c
35813 +++ b/arch/x86/um/tls_32.c
35814 @@ -260,7 +260,7 @@ out:
35815 if (unlikely(task == current &&
35816 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35817 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35818 - "without flushed TLS.", current->pid);
35819 + "without flushed TLS.", task_pid_nr(current));
35823 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35824 index e970320..c006fea 100644
35825 --- a/arch/x86/vdso/Makefile
35826 +++ b/arch/x86/vdso/Makefile
35827 @@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35828 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35829 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35831 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35832 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35833 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35836 diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35837 index 0224987..c7d65a5 100644
35838 --- a/arch/x86/vdso/vdso2c.h
35839 +++ b/arch/x86/vdso/vdso2c.h
35840 @@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35841 unsigned long load_size = -1; /* Work around bogus warning */
35842 unsigned long mapping_size;
35843 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35847 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35849 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35850 index e904c27..b9eaa03 100644
35851 --- a/arch/x86/vdso/vdso32-setup.c
35852 +++ b/arch/x86/vdso/vdso32-setup.c
35854 #include <asm/cpufeature.h>
35855 #include <asm/processor.h>
35856 #include <asm/vdso.h>
35857 +#include <asm/mman.h>
35859 #ifdef CONFIG_COMPAT_VDSO
35860 #define VDSO_DEFAULT 0
35861 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35862 index 1c9f750..cfddb1a 100644
35863 --- a/arch/x86/vdso/vma.c
35864 +++ b/arch/x86/vdso/vma.c
35866 #include <asm/page.h>
35867 #include <asm/hpet.h>
35868 #include <asm/desc.h>
35870 -#if defined(CONFIG_X86_64)
35871 -unsigned int __read_mostly vdso64_enabled = 1;
35873 +#include <asm/mman.h>
35875 void __init init_vdso_image(const struct vdso_image *image)
35877 @@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35881 +#ifdef CONFIG_PAX_RANDMMAP
35882 + if (mm->pax_flags & MF_PAX_RANDMMAP)
35883 + calculate_addr = false;
35886 if (calculate_addr) {
35887 addr = vdso_addr(current->mm->start_stack,
35888 image->size - image->sym_vvar_start);
35889 @@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35890 down_write(&mm->mmap_sem);
35892 addr = get_unmapped_area(NULL, addr,
35893 - image->size - image->sym_vvar_start, 0, 0);
35894 + image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35895 if (IS_ERR_VALUE(addr)) {
35900 text_start = addr - image->sym_vvar_start;
35901 - current->mm->context.vdso = (void __user *)text_start;
35902 + mm->context.vdso = text_start;
35905 * MAYWRITE to allow gdb to COW and set breakpoints
35906 @@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35907 hpet_address >> PAGE_SHIFT,
35909 pgprot_noncached(PAGE_READONLY));
35918 - current->mm->context.vdso = NULL;
35919 + current->mm->context.vdso = 0;
35921 up_write(&mm->mmap_sem);
35923 @@ -191,8 +190,8 @@ static int load_vdso32(void)
35925 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35926 current_thread_info()->sysenter_return =
35927 - current->mm->context.vdso +
35928 - selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35929 + (void __force_user *)(current->mm->context.vdso +
35930 + selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35934 @@ -201,9 +200,6 @@ static int load_vdso32(void)
35935 #ifdef CONFIG_X86_64
35936 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35938 - if (!vdso64_enabled)
35941 return map_vdso(&vdso_image_64, true);
35944 @@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35947 #ifdef CONFIG_X86_X32_ABI
35948 - if (test_thread_flag(TIF_X32)) {
35949 - if (!vdso64_enabled)
35952 + if (test_thread_flag(TIF_X32))
35953 return map_vdso(&vdso_image_x32, true);
35957 return load_vdso32();
35958 @@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35961 #ifdef CONFIG_X86_64
35962 -static __init int vdso_setup(char *s)
35964 - vdso64_enabled = simple_strtoul(s, NULL, 0);
35967 -__setup("vdso=", vdso_setup);
35970 -#ifdef CONFIG_X86_64
35971 static void vgetcpu_cpu_init(void *arg)
35973 int cpu = smp_processor_id();
35974 diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35975 index e88fda8..76ce7ce 100644
35976 --- a/arch/x86/xen/Kconfig
35977 +++ b/arch/x86/xen/Kconfig
35978 @@ -9,6 +9,7 @@ config XEN
35979 select XEN_HAVE_PVMMU
35980 depends on X86_64 || (X86_32 && X86_PAE)
35982 + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35984 This is the Linux Xen port. Enabling this will allow the
35985 kernel to boot in a paravirtualized environment under the
35986 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35987 index 46957ea..ef7b714 100644
35988 --- a/arch/x86/xen/enlighten.c
35989 +++ b/arch/x86/xen/enlighten.c
35990 @@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35992 struct shared_info xen_dummy_shared_info;
35994 -void *xen_initial_gdt;
35996 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35997 __read_mostly int xen_have_vector_callback;
35998 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35999 @@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
36001 unsigned long va = dtr->address;
36002 unsigned int size = dtr->size + 1;
36003 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36004 - unsigned long frames[pages];
36005 + unsigned long frames[65536 / PAGE_SIZE];
36009 @@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36011 unsigned long va = dtr->address;
36012 unsigned int size = dtr->size + 1;
36013 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36014 - unsigned long frames[pages];
36015 + unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
36019 @@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36020 * 8-byte entries, or 16 4k pages..
36023 - BUG_ON(size > 65536);
36024 + BUG_ON(size > GDT_SIZE);
36025 BUG_ON(va & ~PAGE_MASK);
36027 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
36028 @@ -1223,30 +1219,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
36032 -static void xen_reboot(int reason)
36033 +static __noreturn void xen_reboot(int reason)
36035 struct sched_shutdown r = { .reason = reason };
36037 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
36039 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
36043 -static void xen_restart(char *msg)
36044 +static __noreturn void xen_restart(char *msg)
36046 xen_reboot(SHUTDOWN_reboot);
36049 -static void xen_emergency_restart(void)
36050 +static __noreturn void xen_emergency_restart(void)
36052 xen_reboot(SHUTDOWN_reboot);
36055 -static void xen_machine_halt(void)
36056 +static __noreturn void xen_machine_halt(void)
36058 xen_reboot(SHUTDOWN_poweroff);
36061 -static void xen_machine_power_off(void)
36062 +static __noreturn void xen_machine_power_off(void)
36066 @@ -1399,8 +1395,11 @@ static void __ref xen_setup_gdt(int cpu)
36067 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
36068 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
36070 - setup_stack_canary_segment(0);
36071 - switch_to_new_gdt(0);
36072 + setup_stack_canary_segment(cpu);
36073 +#ifdef CONFIG_X86_64
36074 + load_percpu_segment(cpu);
36076 + switch_to_new_gdt(cpu);
36078 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
36079 pv_cpu_ops.load_gdt = xen_load_gdt;
36080 @@ -1515,7 +1514,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
36081 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
36083 /* Work out if we support NX */
36084 - x86_configure_nx();
36085 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36086 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
36087 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
36090 + __supported_pte_mask |= _PAGE_NX;
36091 + rdmsr(MSR_EFER, l, h);
36093 + wrmsr(MSR_EFER, l, h);
36098 xen_build_dynamic_phys_to_machine();
36099 @@ -1543,13 +1552,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
36101 machine_ops = xen_machine_ops;
36104 - * The only reliable way to retain the initial address of the
36105 - * percpu gdt_page is to remember it here, so we can go and
36106 - * mark it RW later, when the initial percpu area is freed.
36108 - xen_initial_gdt = &per_cpu(gdt_page, 0);
36112 #ifdef CONFIG_ACPI_NUMA
36113 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
36114 index dd151b2..d5ab952 100644
36115 --- a/arch/x86/xen/mmu.c
36116 +++ b/arch/x86/xen/mmu.c
36117 @@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
36121 -static pteval_t pte_pfn_to_mfn(pteval_t val)
36122 +static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
36124 if (val & _PAGE_PRESENT) {
36125 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
36126 @@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36127 * L3_k[511] -> level2_fixmap_pgt */
36128 convert_pfn_mfn(level3_kernel_pgt);
36130 + convert_pfn_mfn(level3_vmalloc_start_pgt);
36131 + convert_pfn_mfn(level3_vmalloc_end_pgt);
36132 + convert_pfn_mfn(level3_vmemmap_pgt);
36133 /* L3_k[511][506] -> level1_fixmap_pgt */
36134 + /* L3_k[511][507] -> level1_vsyscall_pgt */
36135 convert_pfn_mfn(level2_fixmap_pgt);
36137 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
36138 @@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36139 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
36140 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
36141 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
36142 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
36143 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
36144 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
36145 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
36146 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
36147 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
36148 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
36149 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
36150 - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
36151 + set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
36152 + set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
36153 + set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
36154 + set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
36156 /* Pin down new L4 */
36157 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
36158 @@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
36159 pv_mmu_ops.set_pud = xen_set_pud;
36160 #if CONFIG_PGTABLE_LEVELS == 4
36161 pv_mmu_ops.set_pgd = xen_set_pgd;
36162 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
36165 /* This will work as long as patching hasn't happened yet
36166 @@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
36167 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
36168 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
36169 .set_pgd = xen_set_pgd_hyper,
36170 + .set_pgd_batched = xen_set_pgd_hyper,
36172 .alloc_pud = xen_alloc_pmd_init,
36173 .release_pud = xen_release_pmd_init,
36174 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
36175 index 8648438..18bac20 100644
36176 --- a/arch/x86/xen/smp.c
36177 +++ b/arch/x86/xen/smp.c
36178 @@ -284,17 +284,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
36180 if (xen_pv_domain()) {
36181 if (!xen_feature(XENFEAT_writable_page_tables))
36182 - /* We've switched to the "real" per-cpu gdt, so make
36183 - * sure the old memory can be recycled. */
36184 - make_lowmem_page_readwrite(xen_initial_gdt);
36186 #ifdef CONFIG_X86_32
36188 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
36189 * expects __USER_DS
36191 - loadsegment(ds, __USER_DS);
36192 - loadsegment(es, __USER_DS);
36193 + loadsegment(ds, __KERNEL_DS);
36194 + loadsegment(es, __KERNEL_DS);
36197 xen_filter_cpu_maps();
36198 @@ -375,7 +371,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36199 #ifdef CONFIG_X86_32
36200 /* Note: PVH is not yet supported on x86_32. */
36201 ctxt->user_regs.fs = __KERNEL_PERCPU;
36202 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36203 + savesegment(gs, ctxt->user_regs.gs);
36205 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
36207 @@ -383,8 +379,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36208 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36209 ctxt->flags = VGCF_IN_KERNEL;
36210 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36211 - ctxt->user_regs.ds = __USER_DS;
36212 - ctxt->user_regs.es = __USER_DS;
36213 + ctxt->user_regs.ds = __KERNEL_DS;
36214 + ctxt->user_regs.es = __KERNEL_DS;
36215 ctxt->user_regs.ss = __KERNEL_DS;
36217 xen_copy_trap_info(ctxt->trap_ctxt);
36218 @@ -720,7 +716,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36220 void __init xen_smp_init(void)
36222 - smp_ops = xen_smp_ops;
36223 + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36224 xen_fill_possible_map();
36227 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36228 index fd92a64..1f72641 100644
36229 --- a/arch/x86/xen/xen-asm_32.S
36230 +++ b/arch/x86/xen/xen-asm_32.S
36231 @@ -99,7 +99,7 @@ ENTRY(xen_iret)
36233 movl $(__KERNEL_PERCPU), %eax
36235 - movl %fs:xen_vcpu, %eax
36236 + mov PER_CPU_VAR(xen_vcpu), %eax
36239 movl %ss:xen_vcpu, %eax
36240 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36241 index 8afdfcc..79239db 100644
36242 --- a/arch/x86/xen/xen-head.S
36243 +++ b/arch/x86/xen/xen-head.S
36244 @@ -41,6 +41,17 @@ ENTRY(startup_xen)
36245 #ifdef CONFIG_X86_32
36246 mov %esi,xen_start_info
36247 mov $init_thread_union+THREAD_SIZE,%esp
36249 + movl $cpu_gdt_table,%edi
36250 + movl $__per_cpu_load,%eax
36251 + movw %ax,__KERNEL_PERCPU + 2(%edi)
36253 + movb %al,__KERNEL_PERCPU + 4(%edi)
36254 + movb %ah,__KERNEL_PERCPU + 7(%edi)
36255 + movl $__per_cpu_end - 1,%eax
36256 + subl $__per_cpu_start,%eax
36257 + movw %ax,__KERNEL_PERCPU + 0(%edi)
36260 mov %rsi,xen_start_info
36261 mov $init_thread_union+THREAD_SIZE,%rsp
36262 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36263 index 9e195c6..523ed36 100644
36264 --- a/arch/x86/xen/xen-ops.h
36265 +++ b/arch/x86/xen/xen-ops.h
36266 @@ -16,8 +16,6 @@ void xen_syscall_target(void);
36267 void xen_syscall32_target(void);
36270 -extern void *xen_initial_gdt;
36273 void xen_copy_trap_info(struct trap_info *traps);
36275 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36276 index 525bd3d..ef888b1 100644
36277 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
36278 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36279 @@ -119,9 +119,9 @@
36280 ----------------------------------------------------------------------*/
36282 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36283 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36284 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36285 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36286 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36288 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36289 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36290 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36291 index 2f33760..835e50a 100644
36292 --- a/arch/xtensa/variants/fsf/include/variant/core.h
36293 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
36295 #ifndef _XTENSA_CORE_H
36296 #define _XTENSA_CORE_H
36298 +#include <linux/const.h>
36300 /****************************************************************************
36301 Parameters Useful for Any Code, USER or PRIVILEGED
36302 @@ -112,9 +113,9 @@
36303 ----------------------------------------------------------------------*/
36305 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36306 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36307 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36308 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36309 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36311 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36312 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36313 diff --git a/block/bio.c b/block/bio.c
36314 index f66a4ea..73ddf55 100644
36317 @@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36322 + if (end < start || end - start > INT_MAX - nr_pages)
36323 return ERR_PTR(-EINVAL);
36325 nr_pages += end - start;
36326 @@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36331 + if (end < start || end - start > INT_MAX - nr_pages)
36332 return ERR_PTR(-EINVAL);
36334 nr_pages += end - start;
36335 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36336 index 0736729..2ec3b48 100644
36337 --- a/block/blk-iopoll.c
36338 +++ b/block/blk-iopoll.c
36339 @@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36341 EXPORT_SYMBOL(blk_iopoll_complete);
36343 -static void blk_iopoll_softirq(struct softirq_action *h)
36344 +static __latent_entropy void blk_iopoll_softirq(void)
36346 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36347 int rearm = 0, budget = blk_iopoll_budget;
36348 diff --git a/block/blk-map.c b/block/blk-map.c
36349 index da310a1..213b5c9 100644
36350 --- a/block/blk-map.c
36351 +++ b/block/blk-map.c
36352 @@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36356 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36357 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36359 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36361 diff --git a/block/blk-mq.c b/block/blk-mq.c
36362 index 594eea0..2dc1fd6 100644
36363 --- a/block/blk-mq.c
36364 +++ b/block/blk-mq.c
36365 @@ -1968,7 +1968,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
36368 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
36369 - blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
36370 + blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
36372 q->nr_queues = nr_cpu_ids;
36373 q->nr_hw_queues = set->nr_hw_queues;
36374 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36375 index 53b1737..08177d2e 100644
36376 --- a/block/blk-softirq.c
36377 +++ b/block/blk-softirq.c
36378 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36379 * Softirq action handler - move entries to local list and loop over them
36380 * while passing them to the queue registered handler.
36382 -static void blk_done_softirq(struct softirq_action *h)
36383 +static __latent_entropy void blk_done_softirq(void)
36385 struct list_head *cpu_list, local_list;
36387 diff --git a/block/bsg.c b/block/bsg.c
36388 index d214e92..9649863 100644
36391 @@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36392 struct sg_io_v4 *hdr, struct bsg_device *bd,
36393 fmode_t has_write_perm)
36395 + unsigned char tmpcmd[sizeof(rq->__cmd)];
36396 + unsigned char *cmdptr;
36398 if (hdr->request_len > BLK_MAX_CDB) {
36399 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36403 + cmdptr = rq->cmd;
36407 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36408 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36412 + if (cmdptr != rq->cmd)
36413 + memcpy(rq->cmd, cmdptr, hdr->request_len);
36415 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36416 if (blk_verify_command(rq->cmd, has_write_perm))
36418 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36419 index f678c73..f35aa18 100644
36420 --- a/block/compat_ioctl.c
36421 +++ b/block/compat_ioctl.c
36422 @@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36423 cgc = compat_alloc_user_space(sizeof(*cgc));
36424 cgc32 = compat_ptr(arg);
36426 - if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36427 + if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36428 get_user(data, &cgc32->buffer) ||
36429 put_user(compat_ptr(data), &cgc->buffer) ||
36430 copy_in_user(&cgc->buflen, &cgc32->buflen,
36431 @@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36432 err |= __get_user(f->spec1, &uf->spec1);
36433 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36434 err |= __get_user(name, &uf->name);
36435 - f->name = compat_ptr(name);
36436 + f->name = (void __force_kernel *)compat_ptr(name);
36440 diff --git a/block/genhd.c b/block/genhd.c
36441 index ea982ea..86e0f9e 100644
36442 --- a/block/genhd.c
36443 +++ b/block/genhd.c
36444 @@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36447 * Register device numbers dev..(dev+range-1)
36448 - * range must be nonzero
36449 + * Noop if @range is zero.
36450 * The hash chain is sorted on range, so that subranges can override.
36452 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36453 struct kobject *(*probe)(dev_t, int *, void *),
36454 int (*lock)(dev_t, void *), void *data)
36456 - kobj_map(bdev_map, devt, range, module, probe, lock, data);
36458 + kobj_map(bdev_map, devt, range, module, probe, lock, data);
36461 EXPORT_SYMBOL(blk_register_region);
36463 +/* undo blk_register_region(), noop if @range is zero */
36464 void blk_unregister_region(dev_t devt, unsigned long range)
36466 - kobj_unmap(bdev_map, devt, range);
36468 + kobj_unmap(bdev_map, devt, range);
36471 EXPORT_SYMBOL(blk_unregister_region);
36472 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36473 index 26cb624..a49c3a5 100644
36474 --- a/block/partitions/efi.c
36475 +++ b/block/partitions/efi.c
36476 @@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36480 + if (!le32_to_cpu(gpt->num_partition_entries))
36482 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36486 count = le32_to_cpu(gpt->num_partition_entries) *
36487 le32_to_cpu(gpt->sizeof_partition_entry);
36490 - pte = kmalloc(count, GFP_KERNEL);
36494 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36495 (u8 *) pte, count) < count) {
36497 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36498 index 55b6f15..b602c9a 100644
36499 --- a/block/scsi_ioctl.c
36500 +++ b/block/scsi_ioctl.c
36501 @@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36502 return put_user(0, p);
36505 -static int sg_get_timeout(struct request_queue *q)
36506 +static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36508 return jiffies_to_clock_t(q->sg_timeout);
36510 @@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36511 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36512 struct sg_io_hdr *hdr, fmode_t mode)
36514 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36515 + unsigned char tmpcmd[sizeof(rq->__cmd)];
36516 + unsigned char *cmdptr;
36518 + if (rq->cmd != rq->__cmd)
36519 + cmdptr = rq->cmd;
36523 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36526 + if (cmdptr != rq->cmd)
36527 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36529 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36532 @@ -420,6 +432,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36534 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36535 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36536 + unsigned char tmpcmd[sizeof(rq->__cmd)];
36537 + unsigned char *cmdptr;
36541 @@ -458,9 +472,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36544 rq->cmd_len = cmdlen;
36545 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
36547 + if (rq->cmd != rq->__cmd)
36548 + cmdptr = rq->cmd;
36552 + if (copy_from_user(cmdptr, sic->data, cmdlen))
36555 + if (rq->cmd != cmdptr)
36556 + memcpy(rq->cmd, cmdptr, cmdlen);
36558 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36561 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36562 index b0602ba..fb71850 100644
36563 --- a/crypto/cryptd.c
36564 +++ b/crypto/cryptd.c
36565 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36567 struct cryptd_blkcipher_request_ctx {
36568 crypto_completion_t complete;
36572 struct cryptd_hash_ctx {
36573 struct crypto_shash *child;
36574 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36576 struct cryptd_aead_request_ctx {
36577 crypto_completion_t complete;
36581 static void cryptd_queue_worker(struct work_struct *work);
36583 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36584 index c305d41..a96de79 100644
36585 --- a/crypto/pcrypt.c
36586 +++ b/crypto/pcrypt.c
36587 @@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36590 pinst->kobj.kset = pcrypt_kset;
36591 - ret = kobject_add(&pinst->kobj, NULL, name);
36592 + ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36594 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36596 diff --git a/crypto/zlib.c b/crypto/zlib.c
36597 index 0eefa9d..0fa3d29 100644
36598 --- a/crypto/zlib.c
36599 +++ b/crypto/zlib.c
36600 @@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36601 zlib_comp_exit(ctx);
36603 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36604 - ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36605 + ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36607 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36608 - ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36609 + ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36612 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36613 diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36614 index 3b37676..898edfa 100644
36615 --- a/drivers/acpi/acpica/hwxfsleep.c
36616 +++ b/drivers/acpi/acpica/hwxfsleep.c
36617 @@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36618 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36620 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36621 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36622 - acpi_hw_extended_sleep},
36623 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36624 - acpi_hw_extended_wake_prep},
36625 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36626 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36627 + .extended_function = acpi_hw_extended_sleep},
36628 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36629 + .extended_function = acpi_hw_extended_wake_prep},
36630 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36631 + .extended_function = acpi_hw_extended_wake}
36635 diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36636 index 16129c7..8b675cd 100644
36637 --- a/drivers/acpi/apei/apei-internal.h
36638 +++ b/drivers/acpi/apei/apei-internal.h
36639 @@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36640 struct apei_exec_ins_type {
36642 apei_exec_ins_func_t run;
36646 struct apei_exec_context {
36648 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36649 index e82d097..0c855c1 100644
36650 --- a/drivers/acpi/apei/ghes.c
36651 +++ b/drivers/acpi/apei/ghes.c
36652 @@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36653 const struct acpi_hest_generic *generic,
36654 const struct acpi_hest_generic_status *estatus)
36656 - static atomic_t seqno;
36657 + static atomic_unchecked_t seqno;
36658 unsigned int curr_seqno;
36661 @@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36665 - curr_seqno = atomic_inc_return(&seqno);
36666 + curr_seqno = atomic_inc_return_unchecked(&seqno);
36667 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36668 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36669 pfx_seq, generic->header.source_id);
36670 diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36671 index a83e3c6..c3d617f 100644
36672 --- a/drivers/acpi/bgrt.c
36673 +++ b/drivers/acpi/bgrt.c
36674 @@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36678 - bin_attr_image.private = bgrt_image;
36679 - bin_attr_image.size = bgrt_image_size;
36680 + pax_open_kernel();
36681 + *(void **)&bin_attr_image.private = bgrt_image;
36682 + *(size_t *)&bin_attr_image.size = bgrt_image_size;
36683 + pax_close_kernel();
36685 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36687 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36688 index 1d17919..315e955 100644
36689 --- a/drivers/acpi/blacklist.c
36690 +++ b/drivers/acpi/blacklist.c
36691 @@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36692 u32 is_critical_error;
36695 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36696 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36699 * POLICY: If *anything* doesn't work, put it on the blacklist.
36700 @@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36704 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36705 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36707 .callback = dmi_disable_osi_vista,
36708 .ident = "Fujitsu Siemens",
36709 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36710 index 513e7230e..802015a 100644
36711 --- a/drivers/acpi/bus.c
36712 +++ b/drivers/acpi/bus.c
36713 @@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36717 -static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36718 +static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36720 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36721 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36722 @@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36726 -static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36727 +static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36731 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36732 index c68e724..e863008 100644
36733 --- a/drivers/acpi/custom_method.c
36734 +++ b/drivers/acpi/custom_method.c
36735 @@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36736 struct acpi_table_header table;
36737 acpi_status status;
36739 +#ifdef CONFIG_GRKERNSEC_KMEM
36744 /* parse the table header to get the table length */
36745 if (count <= sizeof(struct acpi_table_header))
36746 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36747 index 8217e0b..3294cb6 100644
36748 --- a/drivers/acpi/device_pm.c
36749 +++ b/drivers/acpi/device_pm.c
36750 @@ -1026,6 +1026,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36752 #endif /* CONFIG_PM_SLEEP */
36754 +static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36756 static struct dev_pm_domain acpi_general_pm_domain = {
36758 .runtime_suspend = acpi_subsys_runtime_suspend,
36759 @@ -1042,6 +1044,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36760 .restore_early = acpi_subsys_resume_early,
36763 + .detach = acpi_dev_pm_detach
36767 @@ -1111,7 +1114,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36768 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36771 - dev->pm_domain->detach = acpi_dev_pm_detach;
36774 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36775 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36776 index 5e8fed4..d9bb545 100644
36777 --- a/drivers/acpi/ec.c
36778 +++ b/drivers/acpi/ec.c
36779 @@ -1293,7 +1293,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36783 -static struct dmi_system_id ec_dmi_table[] __initdata = {
36784 +static const struct dmi_system_id ec_dmi_table[] __initconst = {
36786 ec_skip_dsdt_scan, "Compal JFL92", {
36787 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36788 diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36789 index 139d9e4..9a9d799 100644
36790 --- a/drivers/acpi/pci_slot.c
36791 +++ b/drivers/acpi/pci_slot.c
36792 @@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36796 -static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36797 +static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36799 * Fujitsu Primequest machines will return 1023 to indicate an
36800 * error if the _SUN method is evaluated on SxFy objects that
36801 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36802 index d9f7158..168e742 100644
36803 --- a/drivers/acpi/processor_driver.c
36804 +++ b/drivers/acpi/processor_driver.c
36805 @@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36809 -static struct notifier_block __refdata acpi_cpu_notifier = {
36810 +static struct notifier_block __refconst acpi_cpu_notifier = {
36811 .notifier_call = acpi_cpu_soft_notify,
36814 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36815 index 39e0c8e..b5ae20c 100644
36816 --- a/drivers/acpi/processor_idle.c
36817 +++ b/drivers/acpi/processor_idle.c
36818 @@ -910,7 +910,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36820 int i, count = CPUIDLE_DRIVER_STATE_START;
36821 struct acpi_processor_cx *cx;
36822 - struct cpuidle_state *state;
36823 + cpuidle_state_no_const *state;
36824 struct cpuidle_driver *drv = &acpi_idle_driver;
36826 if (!pr->flags.power_setup_done)
36827 diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36828 index e5dd808..1eceed1 100644
36829 --- a/drivers/acpi/processor_pdc.c
36830 +++ b/drivers/acpi/processor_pdc.c
36831 @@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36835 -static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36836 +static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36838 set_no_mwait, "Extensa 5220", {
36839 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36840 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36841 index 2f0d4db..b9e9b15 100644
36842 --- a/drivers/acpi/sleep.c
36843 +++ b/drivers/acpi/sleep.c
36844 @@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36848 -static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36849 +static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36851 .callback = init_old_suspend_ordering,
36852 .ident = "Abit KN9 (nForce4 variant)",
36853 diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36854 index 0876d77b..3ba0127 100644
36855 --- a/drivers/acpi/sysfs.c
36856 +++ b/drivers/acpi/sysfs.c
36857 @@ -423,11 +423,11 @@ static u32 num_counters;
36858 static struct attribute **all_attrs;
36859 static u32 acpi_gpe_count;
36861 -static struct attribute_group interrupt_stats_attr_group = {
36862 +static attribute_group_no_const interrupt_stats_attr_group = {
36863 .name = "interrupts",
36866 -static struct kobj_attribute *counter_attrs;
36867 +static kobj_attribute_no_const *counter_attrs;
36869 static void delete_gpe_attr_array(void)
36871 diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36872 index d24fa19..782f1e6 100644
36873 --- a/drivers/acpi/thermal.c
36874 +++ b/drivers/acpi/thermal.c
36875 @@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36879 -static struct dmi_system_id thermal_dmi_table[] __initdata = {
36880 +static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36882 * Award BIOS on this AOpen makes thermal control almost worthless.
36883 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36884 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36885 index cc79d3f..28adb33 100644
36886 --- a/drivers/acpi/video.c
36887 +++ b/drivers/acpi/video.c
36888 @@ -431,7 +431,7 @@ static int __init video_enable_native_backlight(const struct dmi_system_id *d)
36892 -static struct dmi_system_id video_dmi_table[] __initdata = {
36893 +static const struct dmi_system_id video_dmi_table[] __initconst = {
36895 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36897 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36898 index 287c4ba..6a600bc 100644
36899 --- a/drivers/ata/libahci.c
36900 +++ b/drivers/ata/libahci.c
36901 @@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36903 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36905 -static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36906 +static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36907 struct ata_taskfile *tf, int is_cmd, u16 flags,
36908 unsigned long timeout_msec)
36910 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36911 index 577849c..920847c 100644
36912 --- a/drivers/ata/libata-core.c
36913 +++ b/drivers/ata/libata-core.c
36914 @@ -102,7 +102,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36915 static void ata_dev_xfermask(struct ata_device *dev);
36916 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36918 -atomic_t ata_print_id = ATOMIC_INIT(0);
36919 +atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36921 struct ata_force_param {
36923 @@ -4801,7 +4801,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36924 struct ata_port *ap;
36927 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36928 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36932 @@ -4818,7 +4818,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36933 struct ata_port *ap;
36934 struct ata_link *link;
36936 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36937 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36938 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36940 link = qc->dev->link;
36941 @@ -5925,6 +5925,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36945 + pax_open_kernel();
36947 for (cur = ops->inherits; cur; cur = cur->inherits) {
36948 void **inherit = (void **)cur;
36949 @@ -5938,8 +5939,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36953 - ops->inherits = NULL;
36954 + *(struct ata_port_operations **)&ops->inherits = NULL;
36956 + pax_close_kernel();
36957 spin_unlock(&lock);
36960 @@ -6135,7 +6137,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36962 /* give ports names and add SCSI hosts */
36963 for (i = 0; i < host->n_ports; i++) {
36964 - host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36965 + host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36966 host->ports[i]->local_port_no = i + 1;
36969 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36970 index 3131adc..93e7aa0 100644
36971 --- a/drivers/ata/libata-scsi.c
36972 +++ b/drivers/ata/libata-scsi.c
36973 @@ -4209,7 +4209,7 @@ int ata_sas_port_init(struct ata_port *ap)
36977 - ap->print_id = atomic_inc_return(&ata_print_id);
36978 + ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36981 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36982 diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36983 index a998a17..8de4bf4 100644
36984 --- a/drivers/ata/libata.h
36985 +++ b/drivers/ata/libata.h
36986 @@ -53,7 +53,7 @@ enum {
36987 ATA_DNXFER_QUIET = (1 << 31),
36990 -extern atomic_t ata_print_id;
36991 +extern atomic_unchecked_t ata_print_id;
36992 extern int atapi_passthru16;
36993 extern int libata_fua;
36994 extern int libata_noacpi;
36995 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36996 index a9b0c82..207d97d 100644
36997 --- a/drivers/ata/pata_arasan_cf.c
36998 +++ b/drivers/ata/pata_arasan_cf.c
36999 @@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
37000 /* Handle platform specific quirks */
37002 if (quirk & CF_BROKEN_PIO) {
37003 - ap->ops->set_piomode = NULL;
37004 + pax_open_kernel();
37005 + *(void **)&ap->ops->set_piomode = NULL;
37006 + pax_close_kernel();
37009 if (quirk & CF_BROKEN_MWDMA)
37010 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
37011 index f9b983a..887b9d8 100644
37012 --- a/drivers/atm/adummy.c
37013 +++ b/drivers/atm/adummy.c
37014 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
37015 vcc->pop(vcc, skb);
37017 dev_kfree_skb_any(skb);
37018 - atomic_inc(&vcc->stats->tx);
37019 + atomic_inc_unchecked(&vcc->stats->tx);
37023 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
37024 index f1a9198..f466a4a 100644
37025 --- a/drivers/atm/ambassador.c
37026 +++ b/drivers/atm/ambassador.c
37027 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
37028 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
37031 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37032 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37034 // free the descriptor
37036 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37037 dump_skb ("<<<", vc, skb);
37040 - atomic_inc(&atm_vcc->stats->rx);
37041 + atomic_inc_unchecked(&atm_vcc->stats->rx);
37042 __net_timestamp(skb);
37043 // end of our responsibility
37044 atm_vcc->push (atm_vcc, skb);
37045 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37047 PRINTK (KERN_INFO, "dropped over-size frame");
37048 // should we count this?
37049 - atomic_inc(&atm_vcc->stats->rx_drop);
37050 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37054 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
37057 if (check_area (skb->data, skb->len)) {
37058 - atomic_inc(&atm_vcc->stats->tx_err);
37059 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
37060 return -ENOMEM; // ?
37063 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
37064 index 480fa6f..947067c 100644
37065 --- a/drivers/atm/atmtcp.c
37066 +++ b/drivers/atm/atmtcp.c
37067 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37068 if (vcc->pop) vcc->pop(vcc,skb);
37069 else dev_kfree_skb(skb);
37070 if (dev_data) return 0;
37071 - atomic_inc(&vcc->stats->tx_err);
37072 + atomic_inc_unchecked(&vcc->stats->tx_err);
37075 size = skb->len+sizeof(struct atmtcp_hdr);
37076 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37078 if (vcc->pop) vcc->pop(vcc,skb);
37079 else dev_kfree_skb(skb);
37080 - atomic_inc(&vcc->stats->tx_err);
37081 + atomic_inc_unchecked(&vcc->stats->tx_err);
37084 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
37085 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37086 if (vcc->pop) vcc->pop(vcc,skb);
37087 else dev_kfree_skb(skb);
37088 out_vcc->push(out_vcc,new_skb);
37089 - atomic_inc(&vcc->stats->tx);
37090 - atomic_inc(&out_vcc->stats->rx);
37091 + atomic_inc_unchecked(&vcc->stats->tx);
37092 + atomic_inc_unchecked(&out_vcc->stats->rx);
37096 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37097 read_unlock(&vcc_sklist_lock);
37100 - atomic_inc(&vcc->stats->tx_err);
37101 + atomic_inc_unchecked(&vcc->stats->tx_err);
37104 skb_pull(skb,sizeof(struct atmtcp_hdr));
37105 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37106 __net_timestamp(new_skb);
37107 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
37108 out_vcc->push(out_vcc,new_skb);
37109 - atomic_inc(&vcc->stats->tx);
37110 - atomic_inc(&out_vcc->stats->rx);
37111 + atomic_inc_unchecked(&vcc->stats->tx);
37112 + atomic_inc_unchecked(&out_vcc->stats->rx);
37114 if (vcc->pop) vcc->pop(vcc,skb);
37115 else dev_kfree_skb(skb);
37116 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
37117 index 6339efd..2b441d5 100644
37118 --- a/drivers/atm/eni.c
37119 +++ b/drivers/atm/eni.c
37120 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
37121 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
37124 - atomic_inc(&vcc->stats->rx_err);
37125 + atomic_inc_unchecked(&vcc->stats->rx_err);
37128 length = ATM_CELL_SIZE-1; /* no HEC */
37129 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37133 - atomic_inc(&vcc->stats->rx_err);
37134 + atomic_inc_unchecked(&vcc->stats->rx_err);
37137 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
37138 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37139 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
37140 vcc->dev->number,vcc->vci,length,size << 2,descr);
37142 - atomic_inc(&vcc->stats->rx_err);
37143 + atomic_inc_unchecked(&vcc->stats->rx_err);
37146 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
37147 @@ -770,7 +770,7 @@ rx_dequeued++;
37148 vcc->push(vcc,skb);
37151 - atomic_inc(&vcc->stats->rx);
37152 + atomic_inc_unchecked(&vcc->stats->rx);
37154 wake_up(&eni_dev->rx_wait);
37156 @@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
37158 if (vcc->pop) vcc->pop(vcc,skb);
37159 else dev_kfree_skb_irq(skb);
37160 - atomic_inc(&vcc->stats->tx);
37161 + atomic_inc_unchecked(&vcc->stats->tx);
37162 wake_up(&eni_dev->tx_wait);
37165 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
37166 index 82f2ae0..f205c02 100644
37167 --- a/drivers/atm/firestream.c
37168 +++ b/drivers/atm/firestream.c
37169 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
37173 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37174 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37176 fs_dprintk (FS_DEBUG_TXMEM, "i");
37177 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
37178 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37180 skb_put (skb, qe->p1 & 0xffff);
37181 ATM_SKB(skb)->vcc = atm_vcc;
37182 - atomic_inc(&atm_vcc->stats->rx);
37183 + atomic_inc_unchecked(&atm_vcc->stats->rx);
37184 __net_timestamp(skb);
37185 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
37186 atm_vcc->push (atm_vcc, skb);
37187 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37191 - atomic_inc(&atm_vcc->stats->rx_drop);
37192 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37194 case 0x1f: /* Reassembly abort: no buffers. */
37195 /* Silently increment error counter. */
37197 - atomic_inc(&atm_vcc->stats->rx_drop);
37198 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37200 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37201 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37202 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37203 index 75dde90..4309ead 100644
37204 --- a/drivers/atm/fore200e.c
37205 +++ b/drivers/atm/fore200e.c
37206 @@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37208 /* check error condition */
37209 if (*entry->status & STATUS_ERROR)
37210 - atomic_inc(&vcc->stats->tx_err);
37211 + atomic_inc_unchecked(&vcc->stats->tx_err);
37213 - atomic_inc(&vcc->stats->tx);
37214 + atomic_inc_unchecked(&vcc->stats->tx);
37218 @@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37220 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37222 - atomic_inc(&vcc->stats->rx_drop);
37223 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37227 @@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37229 dev_kfree_skb_any(skb);
37231 - atomic_inc(&vcc->stats->rx_drop);
37232 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37236 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37238 vcc->push(vcc, skb);
37239 - atomic_inc(&vcc->stats->rx);
37240 + atomic_inc_unchecked(&vcc->stats->rx);
37242 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37244 @@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37245 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37246 fore200e->atm_dev->number,
37247 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37248 - atomic_inc(&vcc->stats->rx_err);
37249 + atomic_inc_unchecked(&vcc->stats->rx_err);
37253 @@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37257 - atomic_inc(&vcc->stats->tx_err);
37258 + atomic_inc_unchecked(&vcc->stats->tx_err);
37260 fore200e->tx_sat++;
37261 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37262 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37263 index 93dca2e..c5daa69 100644
37264 --- a/drivers/atm/he.c
37265 +++ b/drivers/atm/he.c
37266 @@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37268 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37269 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37270 - atomic_inc(&vcc->stats->rx_drop);
37271 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37272 goto return_host_buffers;
37275 @@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37276 RBRQ_LEN_ERR(he_dev->rbrq_head)
37278 vcc->vpi, vcc->vci);
37279 - atomic_inc(&vcc->stats->rx_err);
37280 + atomic_inc_unchecked(&vcc->stats->rx_err);
37281 goto return_host_buffers;
37284 @@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37285 vcc->push(vcc, skb);
37286 spin_lock(&he_dev->global_lock);
37288 - atomic_inc(&vcc->stats->rx);
37289 + atomic_inc_unchecked(&vcc->stats->rx);
37291 return_host_buffers:
37293 @@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37294 tpd->vcc->pop(tpd->vcc, tpd->skb);
37296 dev_kfree_skb_any(tpd->skb);
37297 - atomic_inc(&tpd->vcc->stats->tx_err);
37298 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37300 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37302 @@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37303 vcc->pop(vcc, skb);
37305 dev_kfree_skb_any(skb);
37306 - atomic_inc(&vcc->stats->tx_err);
37307 + atomic_inc_unchecked(&vcc->stats->tx_err);
37311 @@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37312 vcc->pop(vcc, skb);
37314 dev_kfree_skb_any(skb);
37315 - atomic_inc(&vcc->stats->tx_err);
37316 + atomic_inc_unchecked(&vcc->stats->tx_err);
37320 @@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37321 vcc->pop(vcc, skb);
37323 dev_kfree_skb_any(skb);
37324 - atomic_inc(&vcc->stats->tx_err);
37325 + atomic_inc_unchecked(&vcc->stats->tx_err);
37326 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37329 @@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37330 vcc->pop(vcc, skb);
37332 dev_kfree_skb_any(skb);
37333 - atomic_inc(&vcc->stats->tx_err);
37334 + atomic_inc_unchecked(&vcc->stats->tx_err);
37335 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37338 @@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37339 __enqueue_tpd(he_dev, tpd, cid);
37340 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37342 - atomic_inc(&vcc->stats->tx);
37343 + atomic_inc_unchecked(&vcc->stats->tx);
37347 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37348 index 527bbd5..96570c8 100644
37349 --- a/drivers/atm/horizon.c
37350 +++ b/drivers/atm/horizon.c
37351 @@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37353 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37355 - atomic_inc(&vcc->stats->rx);
37356 + atomic_inc_unchecked(&vcc->stats->rx);
37357 __net_timestamp(skb);
37358 // end of our responsibility
37359 vcc->push (vcc, skb);
37360 @@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37361 dev->tx_iovec = NULL;
37364 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37365 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37368 hrz_kfree_skb (skb);
37369 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37370 index 074616b..d6b3d5f 100644
37371 --- a/drivers/atm/idt77252.c
37372 +++ b/drivers/atm/idt77252.c
37373 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37375 dev_kfree_skb(skb);
37377 - atomic_inc(&vcc->stats->tx);
37378 + atomic_inc_unchecked(&vcc->stats->tx);
37381 atomic_dec(&scq->used);
37382 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37383 if ((sb = dev_alloc_skb(64)) == NULL) {
37384 printk("%s: Can't allocate buffers for aal0.\n",
37386 - atomic_add(i, &vcc->stats->rx_drop);
37387 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
37390 if (!atm_charge(vcc, sb->truesize)) {
37391 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37393 - atomic_add(i - 1, &vcc->stats->rx_drop);
37394 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37398 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37399 ATM_SKB(sb)->vcc = vcc;
37400 __net_timestamp(sb);
37401 vcc->push(vcc, sb);
37402 - atomic_inc(&vcc->stats->rx);
37403 + atomic_inc_unchecked(&vcc->stats->rx);
37405 cell += ATM_CELL_PAYLOAD;
37407 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37409 card->name, len, rpp->len, readl(SAR_REG_CDC));
37410 recycle_rx_pool_skb(card, rpp);
37411 - atomic_inc(&vcc->stats->rx_err);
37412 + atomic_inc_unchecked(&vcc->stats->rx_err);
37415 if (stat & SAR_RSQE_CRC) {
37416 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37417 recycle_rx_pool_skb(card, rpp);
37418 - atomic_inc(&vcc->stats->rx_err);
37419 + atomic_inc_unchecked(&vcc->stats->rx_err);
37422 if (skb_queue_len(&rpp->queue) > 1) {
37423 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37424 RXPRINTK("%s: Can't alloc RX skb.\n",
37426 recycle_rx_pool_skb(card, rpp);
37427 - atomic_inc(&vcc->stats->rx_err);
37428 + atomic_inc_unchecked(&vcc->stats->rx_err);
37431 if (!atm_charge(vcc, skb->truesize)) {
37432 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37433 __net_timestamp(skb);
37435 vcc->push(vcc, skb);
37436 - atomic_inc(&vcc->stats->rx);
37437 + atomic_inc_unchecked(&vcc->stats->rx);
37441 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37442 __net_timestamp(skb);
37444 vcc->push(vcc, skb);
37445 - atomic_inc(&vcc->stats->rx);
37446 + atomic_inc_unchecked(&vcc->stats->rx);
37448 if (skb->truesize > SAR_FB_SIZE_3)
37449 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37450 @@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37451 if (vcc->qos.aal != ATM_AAL0) {
37452 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37453 card->name, vpi, vci);
37454 - atomic_inc(&vcc->stats->rx_drop);
37455 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37459 if ((sb = dev_alloc_skb(64)) == NULL) {
37460 printk("%s: Can't allocate buffers for AAL0.\n",
37462 - atomic_inc(&vcc->stats->rx_err);
37463 + atomic_inc_unchecked(&vcc->stats->rx_err);
37467 @@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37468 ATM_SKB(sb)->vcc = vcc;
37469 __net_timestamp(sb);
37470 vcc->push(vcc, sb);
37471 - atomic_inc(&vcc->stats->rx);
37472 + atomic_inc_unchecked(&vcc->stats->rx);
37475 skb_pull(queue, 64);
37476 @@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37479 printk("%s: NULL connection in send().\n", card->name);
37480 - atomic_inc(&vcc->stats->tx_err);
37481 + atomic_inc_unchecked(&vcc->stats->tx_err);
37482 dev_kfree_skb(skb);
37485 if (!test_bit(VCF_TX, &vc->flags)) {
37486 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37487 - atomic_inc(&vcc->stats->tx_err);
37488 + atomic_inc_unchecked(&vcc->stats->tx_err);
37489 dev_kfree_skb(skb);
37492 @@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37495 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37496 - atomic_inc(&vcc->stats->tx_err);
37497 + atomic_inc_unchecked(&vcc->stats->tx_err);
37498 dev_kfree_skb(skb);
37502 if (skb_shinfo(skb)->nr_frags != 0) {
37503 printk("%s: No scatter-gather yet.\n", card->name);
37504 - atomic_inc(&vcc->stats->tx_err);
37505 + atomic_inc_unchecked(&vcc->stats->tx_err);
37506 dev_kfree_skb(skb);
37509 @@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37511 err = queue_skb(card, vc, skb, oam);
37513 - atomic_inc(&vcc->stats->tx_err);
37514 + atomic_inc_unchecked(&vcc->stats->tx_err);
37515 dev_kfree_skb(skb);
37518 @@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37519 skb = dev_alloc_skb(64);
37521 printk("%s: Out of memory in send_oam().\n", card->name);
37522 - atomic_inc(&vcc->stats->tx_err);
37523 + atomic_inc_unchecked(&vcc->stats->tx_err);
37526 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37527 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37528 index 924f8e2..3375a3e 100644
37529 --- a/drivers/atm/iphase.c
37530 +++ b/drivers/atm/iphase.c
37531 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37532 status = (u_short) (buf_desc_ptr->desc_mode);
37533 if (status & (RX_CER | RX_PTE | RX_OFL))
37535 - atomic_inc(&vcc->stats->rx_err);
37536 + atomic_inc_unchecked(&vcc->stats->rx_err);
37537 IF_ERR(printk("IA: bad packet, dropping it");)
37538 if (status & RX_CER) {
37539 IF_ERR(printk(" cause: packet CRC error\n");)
37540 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37541 len = dma_addr - buf_addr;
37542 if (len > iadev->rx_buf_sz) {
37543 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37544 - atomic_inc(&vcc->stats->rx_err);
37545 + atomic_inc_unchecked(&vcc->stats->rx_err);
37546 goto out_free_desc;
37549 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37550 ia_vcc = INPH_IA_VCC(vcc);
37551 if (ia_vcc == NULL)
37553 - atomic_inc(&vcc->stats->rx_err);
37554 + atomic_inc_unchecked(&vcc->stats->rx_err);
37555 atm_return(vcc, skb->truesize);
37556 dev_kfree_skb_any(skb);
37558 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37559 if ((length > iadev->rx_buf_sz) || (length >
37560 (skb->len - sizeof(struct cpcs_trailer))))
37562 - atomic_inc(&vcc->stats->rx_err);
37563 + atomic_inc_unchecked(&vcc->stats->rx_err);
37564 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37565 length, skb->len);)
37566 atm_return(vcc, skb->truesize);
37567 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37569 IF_RX(printk("rx_dle_intr: skb push");)
37570 vcc->push(vcc,skb);
37571 - atomic_inc(&vcc->stats->rx);
37572 + atomic_inc_unchecked(&vcc->stats->rx);
37573 iadev->rx_pkt_cnt++;
37576 @@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37578 struct k_sonet_stats *stats;
37579 stats = &PRIV(_ia_dev[board])->sonet_stats;
37580 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37581 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37582 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37583 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37584 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37585 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37586 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37587 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37588 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37589 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37590 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37591 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37592 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37593 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37594 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37595 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37596 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37597 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37599 ia_cmds.status = 0;
37601 @@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37602 if ((desc == 0) || (desc > iadev->num_tx_desc))
37604 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37605 - atomic_inc(&vcc->stats->tx);
37606 + atomic_inc_unchecked(&vcc->stats->tx);
37608 vcc->pop(vcc, skb);
37610 @@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37611 ATM_DESC(skb) = vcc->vci;
37612 skb_queue_tail(&iadev->tx_dma_q, skb);
37614 - atomic_inc(&vcc->stats->tx);
37615 + atomic_inc_unchecked(&vcc->stats->tx);
37616 iadev->tx_pkt_cnt++;
37617 /* Increment transaction counter */
37618 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37621 /* add flow control logic */
37622 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37623 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37624 if (iavcc->vc_desc_cnt > 10) {
37625 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37626 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37627 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37628 index ce43ae3..969de38 100644
37629 --- a/drivers/atm/lanai.c
37630 +++ b/drivers/atm/lanai.c
37631 @@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37632 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37633 lanai_endtx(lanai, lvcc);
37634 lanai_free_skb(lvcc->tx.atmvcc, skb);
37635 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37636 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37639 /* Try to fill the buffer - don't call unless there is backlog */
37640 @@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37641 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37642 __net_timestamp(skb);
37643 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37644 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37645 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37647 lvcc->rx.buf.ptr = end;
37648 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37649 @@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37650 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37651 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37652 lanai->stats.service_rxnotaal5++;
37653 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37654 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37657 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37658 @@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37660 read_unlock(&vcc_sklist_lock);
37661 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37662 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37663 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37664 lvcc->stats.x.aal5.service_trash++;
37665 bytes = (SERVICE_GET_END(s) * 16) -
37666 (((unsigned long) lvcc->rx.buf.ptr) -
37667 @@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37669 if (s & SERVICE_STREAM) {
37670 read_unlock(&vcc_sklist_lock);
37671 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37672 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37673 lvcc->stats.x.aal5.service_stream++;
37674 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37675 "PDU on VCI %d!\n", lanai->number, vci);
37676 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37679 DPRINTK("got rx crc error on vci %d\n", vci);
37680 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37681 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37682 lvcc->stats.x.aal5.service_rxcrc++;
37683 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37684 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37685 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37686 index ddc4ceb..36e29aa 100644
37687 --- a/drivers/atm/nicstar.c
37688 +++ b/drivers/atm/nicstar.c
37689 @@ -1632,7 +1632,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37690 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37691 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37693 - atomic_inc(&vcc->stats->tx_err);
37694 + atomic_inc_unchecked(&vcc->stats->tx_err);
37695 dev_kfree_skb_any(skb);
37698 @@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37700 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37702 - atomic_inc(&vcc->stats->tx_err);
37703 + atomic_inc_unchecked(&vcc->stats->tx_err);
37704 dev_kfree_skb_any(skb);
37707 @@ -1648,14 +1648,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37708 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37709 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37711 - atomic_inc(&vcc->stats->tx_err);
37712 + atomic_inc_unchecked(&vcc->stats->tx_err);
37713 dev_kfree_skb_any(skb);
37717 if (skb_shinfo(skb)->nr_frags != 0) {
37718 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37719 - atomic_inc(&vcc->stats->tx_err);
37720 + atomic_inc_unchecked(&vcc->stats->tx_err);
37721 dev_kfree_skb_any(skb);
37724 @@ -1703,11 +1703,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37727 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37728 - atomic_inc(&vcc->stats->tx_err);
37729 + atomic_inc_unchecked(&vcc->stats->tx_err);
37730 dev_kfree_skb_any(skb);
37733 - atomic_inc(&vcc->stats->tx);
37734 + atomic_inc_unchecked(&vcc->stats->tx);
37738 @@ -2024,14 +2024,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37740 ("nicstar%d: Can't allocate buffers for aal0.\n",
37742 - atomic_add(i, &vcc->stats->rx_drop);
37743 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
37746 if (!atm_charge(vcc, sb->truesize)) {
37748 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37750 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37751 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37752 dev_kfree_skb_any(sb);
37755 @@ -2046,7 +2046,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37756 ATM_SKB(sb)->vcc = vcc;
37757 __net_timestamp(sb);
37758 vcc->push(vcc, sb);
37759 - atomic_inc(&vcc->stats->rx);
37760 + atomic_inc_unchecked(&vcc->stats->rx);
37761 cell += ATM_CELL_PAYLOAD;
37764 @@ -2063,7 +2063,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37765 if (iovb == NULL) {
37766 printk("nicstar%d: Out of iovec buffers.\n",
37768 - atomic_inc(&vcc->stats->rx_drop);
37769 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37770 recycle_rx_buf(card, skb);
37773 @@ -2087,7 +2087,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37774 small or large buffer itself. */
37775 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37776 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37777 - atomic_inc(&vcc->stats->rx_err);
37778 + atomic_inc_unchecked(&vcc->stats->rx_err);
37779 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37781 NS_PRV_IOVCNT(iovb) = 0;
37782 @@ -2107,7 +2107,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37783 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37785 which_list(card, skb);
37786 - atomic_inc(&vcc->stats->rx_err);
37787 + atomic_inc_unchecked(&vcc->stats->rx_err);
37788 recycle_rx_buf(card, skb);
37790 recycle_iov_buf(card, iovb);
37791 @@ -2120,7 +2120,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37792 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37794 which_list(card, skb);
37795 - atomic_inc(&vcc->stats->rx_err);
37796 + atomic_inc_unchecked(&vcc->stats->rx_err);
37797 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37798 NS_PRV_IOVCNT(iovb));
37800 @@ -2143,7 +2143,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37801 printk(" - PDU size mismatch.\n");
37804 - atomic_inc(&vcc->stats->rx_err);
37805 + atomic_inc_unchecked(&vcc->stats->rx_err);
37806 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37807 NS_PRV_IOVCNT(iovb));
37809 @@ -2157,14 +2157,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37810 /* skb points to a small buffer */
37811 if (!atm_charge(vcc, skb->truesize)) {
37812 push_rxbufs(card, skb);
37813 - atomic_inc(&vcc->stats->rx_drop);
37814 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37817 dequeue_sm_buf(card, skb);
37818 ATM_SKB(skb)->vcc = vcc;
37819 __net_timestamp(skb);
37820 vcc->push(vcc, skb);
37821 - atomic_inc(&vcc->stats->rx);
37822 + atomic_inc_unchecked(&vcc->stats->rx);
37824 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37825 struct sk_buff *sb;
37826 @@ -2175,14 +2175,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37827 if (len <= NS_SMBUFSIZE) {
37828 if (!atm_charge(vcc, sb->truesize)) {
37829 push_rxbufs(card, sb);
37830 - atomic_inc(&vcc->stats->rx_drop);
37831 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37834 dequeue_sm_buf(card, sb);
37835 ATM_SKB(sb)->vcc = vcc;
37836 __net_timestamp(sb);
37837 vcc->push(vcc, sb);
37838 - atomic_inc(&vcc->stats->rx);
37839 + atomic_inc_unchecked(&vcc->stats->rx);
37842 push_rxbufs(card, skb);
37843 @@ -2191,7 +2191,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37845 if (!atm_charge(vcc, skb->truesize)) {
37846 push_rxbufs(card, skb);
37847 - atomic_inc(&vcc->stats->rx_drop);
37848 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37850 dequeue_lg_buf(card, skb);
37851 skb_push(skb, NS_SMBUFSIZE);
37852 @@ -2201,7 +2201,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37853 ATM_SKB(skb)->vcc = vcc;
37854 __net_timestamp(skb);
37855 vcc->push(vcc, skb);
37856 - atomic_inc(&vcc->stats->rx);
37857 + atomic_inc_unchecked(&vcc->stats->rx);
37860 push_rxbufs(card, sb);
37861 @@ -2222,7 +2222,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37863 ("nicstar%d: Out of huge buffers.\n",
37865 - atomic_inc(&vcc->stats->rx_drop);
37866 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37867 recycle_iovec_rx_bufs(card,
37870 @@ -2273,7 +2273,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37871 card->hbpool.count++;
37873 dev_kfree_skb_any(hb);
37874 - atomic_inc(&vcc->stats->rx_drop);
37875 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37877 /* Copy the small buffer to the huge buffer */
37878 sb = (struct sk_buff *)iov->iov_base;
37879 @@ -2307,7 +2307,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37880 ATM_SKB(hb)->vcc = vcc;
37881 __net_timestamp(hb);
37882 vcc->push(vcc, hb);
37883 - atomic_inc(&vcc->stats->rx);
37884 + atomic_inc_unchecked(&vcc->stats->rx);
37888 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37889 index 74e18b0..f16afa0 100644
37890 --- a/drivers/atm/solos-pci.c
37891 +++ b/drivers/atm/solos-pci.c
37892 @@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37894 atm_charge(vcc, skb->truesize);
37895 vcc->push(vcc, skb);
37896 - atomic_inc(&vcc->stats->rx);
37897 + atomic_inc_unchecked(&vcc->stats->rx);
37901 @@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37902 vcc = SKB_CB(oldskb)->vcc;
37905 - atomic_inc(&vcc->stats->tx);
37906 + atomic_inc_unchecked(&vcc->stats->tx);
37907 solos_pop(vcc, oldskb);
37909 dev_kfree_skb_irq(oldskb);
37910 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37911 index 0215934..ce9f5b1 100644
37912 --- a/drivers/atm/suni.c
37913 +++ b/drivers/atm/suni.c
37914 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37917 #define ADD_LIMITED(s,v) \
37918 - atomic_add((v),&stats->s); \
37919 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37920 + atomic_add_unchecked((v),&stats->s); \
37921 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37924 static void suni_hz(unsigned long from_timer)
37925 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37926 index 5120a96..e2572bd 100644
37927 --- a/drivers/atm/uPD98402.c
37928 +++ b/drivers/atm/uPD98402.c
37929 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37930 struct sonet_stats tmp;
37933 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37934 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37935 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37936 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37937 if (zero && !error) {
37938 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37941 #define ADD_LIMITED(s,v) \
37942 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37943 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37944 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37945 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37946 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37947 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37950 static void stat_event(struct atm_dev *dev)
37951 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37952 if (reason & uPD98402_INT_PFM) stat_event(dev);
37953 if (reason & uPD98402_INT_PCO) {
37954 (void) GET(PCOCR); /* clear interrupt cause */
37955 - atomic_add(GET(HECCT),
37956 + atomic_add_unchecked(GET(HECCT),
37957 &PRIV(dev)->sonet_stats.uncorr_hcs);
37959 if ((reason & uPD98402_INT_RFO) &&
37960 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37961 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37962 uPD98402_INT_LOS),PIMR); /* enable them */
37963 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37964 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37965 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37966 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37967 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37968 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37969 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37973 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37974 index cecfb94..87009ec 100644
37975 --- a/drivers/atm/zatm.c
37976 +++ b/drivers/atm/zatm.c
37977 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37980 dev_kfree_skb_irq(skb);
37981 - if (vcc) atomic_inc(&vcc->stats->rx_err);
37982 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37985 if (!atm_charge(vcc,skb->truesize)) {
37986 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37988 ATM_SKB(skb)->vcc = vcc;
37989 vcc->push(vcc,skb);
37990 - atomic_inc(&vcc->stats->rx);
37991 + atomic_inc_unchecked(&vcc->stats->rx);
37993 zout(pos & 0xffff,MTA(mbx));
37994 #if 0 /* probably a stupid idea */
37995 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37996 skb_queue_head(&zatm_vcc->backlog,skb);
37999 - atomic_inc(&vcc->stats->tx);
38000 + atomic_inc_unchecked(&vcc->stats->tx);
38001 wake_up(&zatm_vcc->tx_wait);
38004 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
38005 index 79bc203..fa3945b 100644
38006 --- a/drivers/base/bus.c
38007 +++ b/drivers/base/bus.c
38008 @@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
38011 mutex_lock(&subsys->p->mutex);
38012 - list_add_tail(&sif->node, &subsys->p->interfaces);
38013 + pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
38014 if (sif->add_dev) {
38015 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38016 while ((dev = subsys_dev_iter_next(&iter)))
38017 @@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
38018 subsys = sif->subsys;
38020 mutex_lock(&subsys->p->mutex);
38021 - list_del_init(&sif->node);
38022 + pax_list_del_init((struct list_head *)&sif->node);
38023 if (sif->remove_dev) {
38024 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38025 while ((dev = subsys_dev_iter_next(&iter)))
38026 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
38027 index 68f0314..ca2a609 100644
38028 --- a/drivers/base/devtmpfs.c
38029 +++ b/drivers/base/devtmpfs.c
38030 @@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
38034 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
38035 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
38037 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
38039 @@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
38040 *err = sys_unshare(CLONE_NEWNS);
38043 - *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
38044 + *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
38047 - sys_chdir("/.."); /* will traverse into overmounted root */
38049 + sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
38050 + sys_chroot((char __force_user *)".");
38051 complete(&setup_done);
38053 spin_lock(&req_lock);
38054 diff --git a/drivers/base/node.c b/drivers/base/node.c
38055 index a2aa65b..8831326 100644
38056 --- a/drivers/base/node.c
38057 +++ b/drivers/base/node.c
38058 @@ -613,7 +613,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
38060 struct device_attribute attr;
38061 enum node_states state;
38065 static ssize_t show_node_state(struct device *dev,
38066 struct device_attribute *attr, char *buf)
38067 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
38068 index 2327613..211d7f5 100644
38069 --- a/drivers/base/power/domain.c
38070 +++ b/drivers/base/power/domain.c
38071 @@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
38073 struct cpuidle_driver *cpuidle_drv;
38074 struct gpd_cpuidle_data *cpuidle_data;
38075 - struct cpuidle_state *idle_state;
38076 + cpuidle_state_no_const *idle_state;
38079 if (IS_ERR_OR_NULL(genpd) || state < 0)
38080 @@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
38081 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
38083 struct gpd_cpuidle_data *cpuidle_data;
38084 - struct cpuidle_state *idle_state;
38085 + cpuidle_state_no_const *idle_state;
38088 if (IS_ERR_OR_NULL(genpd))
38089 @@ -2222,8 +2222,11 @@ int genpd_dev_pm_attach(struct device *dev)
38093 - dev->pm_domain->detach = genpd_dev_pm_detach;
38094 - dev->pm_domain->sync = genpd_dev_pm_sync;
38095 + pax_open_kernel();
38096 + *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
38097 + *(void **)&dev->pm_domain->sync = genpd_dev_pm_sync;
38098 + pax_close_kernel();
38100 pm_genpd_poweron(pd);
38103 diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
38104 index d2be3f9..0a3167a 100644
38105 --- a/drivers/base/power/sysfs.c
38106 +++ b/drivers/base/power/sysfs.c
38107 @@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
38111 - return sprintf(buf, p);
38112 + return sprintf(buf, "%s", p);
38115 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
38116 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
38117 index 7726200..a417da7 100644
38118 --- a/drivers/base/power/wakeup.c
38119 +++ b/drivers/base/power/wakeup.c
38120 @@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
38121 * They need to be modified together atomically, so it's better to use one
38122 * atomic variable to hold them both.
38124 -static atomic_t combined_event_count = ATOMIC_INIT(0);
38125 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
38127 #define IN_PROGRESS_BITS (sizeof(int) * 4)
38128 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
38130 static void split_counters(unsigned int *cnt, unsigned int *inpr)
38132 - unsigned int comb = atomic_read(&combined_event_count);
38133 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
38135 *cnt = (comb >> IN_PROGRESS_BITS);
38136 *inpr = comb & MAX_IN_PROGRESS;
38137 @@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
38138 ws->start_prevent_time = ws->last_time;
38140 /* Increment the counter of events in progress. */
38141 - cec = atomic_inc_return(&combined_event_count);
38142 + cec = atomic_inc_return_unchecked(&combined_event_count);
38144 trace_wakeup_source_activate(ws->name, cec);
38146 @@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
38147 * Increment the counter of registered wakeup events and decrement the
38148 * couter of wakeup events in progress simultaneously.
38150 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
38151 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
38152 trace_wakeup_source_deactivate(ws->name, cec);
38154 split_counters(&cnt, &inpr);
38155 diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
38156 index 8d98a32..61d3165 100644
38157 --- a/drivers/base/syscore.c
38158 +++ b/drivers/base/syscore.c
38159 @@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
38160 void register_syscore_ops(struct syscore_ops *ops)
38162 mutex_lock(&syscore_ops_lock);
38163 - list_add_tail(&ops->node, &syscore_ops_list);
38164 + pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
38165 mutex_unlock(&syscore_ops_lock);
38167 EXPORT_SYMBOL_GPL(register_syscore_ops);
38168 @@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
38169 void unregister_syscore_ops(struct syscore_ops *ops)
38171 mutex_lock(&syscore_ops_lock);
38172 - list_del(&ops->node);
38173 + pax_list_del((struct list_head *)&ops->node);
38174 mutex_unlock(&syscore_ops_lock);
38176 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
38177 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
38178 index ff20f19..018f1da 100644
38179 --- a/drivers/block/cciss.c
38180 +++ b/drivers/block/cciss.c
38181 @@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
38182 while (!list_empty(&h->reqQ)) {
38183 c = list_entry(h->reqQ.next, CommandList_struct, list);
38184 /* can't do anything if fifo is full */
38185 - if ((h->access.fifo_full(h))) {
38186 + if ((h->access->fifo_full(h))) {
38187 dev_warn(&h->pdev->dev, "fifo full\n");
38190 @@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
38193 /* Tell the controller execute command */
38194 - h->access.submit_command(h, c);
38195 + h->access->submit_command(h, c);
38197 /* Put job onto the completed Q */
38199 @@ -3444,17 +3444,17 @@ startio:
38201 static inline unsigned long get_next_completion(ctlr_info_t *h)
38203 - return h->access.command_completed(h);
38204 + return h->access->command_completed(h);
38207 static inline int interrupt_pending(ctlr_info_t *h)
38209 - return h->access.intr_pending(h);
38210 + return h->access->intr_pending(h);
38213 static inline long interrupt_not_for_us(ctlr_info_t *h)
38215 - return ((h->access.intr_pending(h) == 0) ||
38216 + return ((h->access->intr_pending(h) == 0) ||
38217 (h->interrupts_enabled == 0));
38220 @@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38223 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38224 - return h->access.command_completed(h);
38225 + return h->access->command_completed(h);
38227 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38228 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38229 @@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38230 trans_support & CFGTBL_Trans_use_short_tags);
38232 /* Change the access methods to the performant access methods */
38233 - h->access = SA5_performant_access;
38234 + h->access = &SA5_performant_access;
38235 h->transMethod = CFGTBL_Trans_Performant;
38238 @@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38239 if (prod_index < 0)
38241 h->product_name = products[prod_index].product_name;
38242 - h->access = *(products[prod_index].access);
38243 + h->access = products[prod_index].access;
38245 if (cciss_board_disabled(h)) {
38246 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38247 @@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38250 /* make sure the board interrupts are off */
38251 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
38252 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
38253 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38256 @@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38257 * fake ones to scoop up any residual completions.
38259 spin_lock_irqsave(&h->lock, flags);
38260 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
38261 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
38262 spin_unlock_irqrestore(&h->lock, flags);
38263 free_irq(h->intr[h->intr_mode], h);
38264 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38265 @@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38266 dev_info(&h->pdev->dev, "Board READY.\n");
38267 dev_info(&h->pdev->dev,
38268 "Waiting for stale completions to drain.\n");
38269 - h->access.set_intr_mask(h, CCISS_INTR_ON);
38270 + h->access->set_intr_mask(h, CCISS_INTR_ON);
38272 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
38273 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
38275 rc = controller_reset_failed(h->cfgtable);
38277 @@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38278 cciss_scsi_setup(h);
38280 /* Turn the interrupts on so we can service requests */
38281 - h->access.set_intr_mask(h, CCISS_INTR_ON);
38282 + h->access->set_intr_mask(h, CCISS_INTR_ON);
38284 /* Get the firmware version */
38285 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38286 @@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38288 if (return_code != IO_OK)
38289 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38290 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
38291 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
38292 free_irq(h->intr[h->intr_mode], h);
38295 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38296 index 7fda30e..2f27946 100644
38297 --- a/drivers/block/cciss.h
38298 +++ b/drivers/block/cciss.h
38299 @@ -101,7 +101,7 @@ struct ctlr_info
38300 /* information about each logical volume */
38301 drive_info_struct *drv[CISS_MAX_LUN];
38303 - struct access_method access;
38304 + struct access_method *access;
38306 /* queue and queue Info */
38307 struct list_head reqQ;
38308 @@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38311 static struct access_method SA5_access = {
38312 - SA5_submit_command,
38315 - SA5_intr_pending,
38317 + .submit_command = SA5_submit_command,
38318 + .set_intr_mask = SA5_intr_mask,
38319 + .fifo_full = SA5_fifo_full,
38320 + .intr_pending = SA5_intr_pending,
38321 + .command_completed = SA5_completed,
38324 static struct access_method SA5B_access = {
38325 - SA5_submit_command,
38328 - SA5B_intr_pending,
38330 + .submit_command = SA5_submit_command,
38331 + .set_intr_mask = SA5B_intr_mask,
38332 + .fifo_full = SA5_fifo_full,
38333 + .intr_pending = SA5B_intr_pending,
38334 + .command_completed = SA5_completed,
38337 static struct access_method SA5_performant_access = {
38338 - SA5_submit_command,
38339 - SA5_performant_intr_mask,
38341 - SA5_performant_intr_pending,
38342 - SA5_performant_completed,
38343 + .submit_command = SA5_submit_command,
38344 + .set_intr_mask = SA5_performant_intr_mask,
38345 + .fifo_full = SA5_fifo_full,
38346 + .intr_pending = SA5_performant_intr_pending,
38347 + .command_completed = SA5_performant_completed,
38350 struct board_type {
38351 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38352 index f749df9..5f8b9c4 100644
38353 --- a/drivers/block/cpqarray.c
38354 +++ b/drivers/block/cpqarray.c
38355 @@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38356 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38359 - hba[i]->access.set_intr_mask(hba[i], 0);
38360 + hba[i]->access->set_intr_mask(hba[i], 0);
38361 if (request_irq(hba[i]->intr, do_ida_intr, IRQF_SHARED,
38362 hba[i]->devname, hba[i]))
38364 @@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38365 add_timer(&hba[i]->timer);
38367 /* Enable IRQ now that spinlock and rate limit timer are set up */
38368 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38369 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38371 for(j=0; j<NWD; j++) {
38372 struct gendisk *disk = ida_gendisk[i][j];
38373 @@ -694,7 +694,7 @@ DBGINFO(
38374 for(i=0; i<NR_PRODUCTS; i++) {
38375 if (board_id == products[i].board_id) {
38376 c->product_name = products[i].product_name;
38377 - c->access = *(products[i].access);
38378 + c->access = products[i].access;
38382 @@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38383 hba[ctlr]->intr = intr;
38384 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38385 hba[ctlr]->product_name = products[j].product_name;
38386 - hba[ctlr]->access = *(products[j].access);
38387 + hba[ctlr]->access = products[j].access;
38388 hba[ctlr]->ctlr = ctlr;
38389 hba[ctlr]->board_id = board_id;
38390 hba[ctlr]->pci_dev = NULL; /* not PCI */
38391 @@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38393 while((c = h->reqQ) != NULL) {
38394 /* Can't do anything if we're busy */
38395 - if (h->access.fifo_full(h) == 0)
38396 + if (h->access->fifo_full(h) == 0)
38399 /* Get the first entry from the request Q */
38400 @@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38403 /* Tell the controller to do our bidding */
38404 - h->access.submit_command(h, c);
38405 + h->access->submit_command(h, c);
38407 /* Get onto the completion Q */
38409 @@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38410 unsigned long flags;
38413 - istat = h->access.intr_pending(h);
38414 + istat = h->access->intr_pending(h);
38415 /* Is this interrupt for us? */
38418 @@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38420 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38421 if (istat & FIFO_NOT_EMPTY) {
38422 - while((a = h->access.command_completed(h))) {
38423 + while((a = h->access->command_completed(h))) {
38425 if ((c = h->cmpQ) == NULL)
38427 @@ -1448,11 +1448,11 @@ static int sendcmd(
38429 * Disable interrupt
38431 - info_p->access.set_intr_mask(info_p, 0);
38432 + info_p->access->set_intr_mask(info_p, 0);
38433 /* Make sure there is room in the command FIFO */
38434 /* Actually it should be completely empty at this time. */
38435 for (i = 200000; i > 0; i--) {
38436 - temp = info_p->access.fifo_full(info_p);
38437 + temp = info_p->access->fifo_full(info_p);
38441 @@ -1465,7 +1465,7 @@ DBG(
38445 - info_p->access.submit_command(info_p, c);
38446 + info_p->access->submit_command(info_p, c);
38447 complete = pollcomplete(ctlr);
38449 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38450 @@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38451 * we check the new geometry. Then turn interrupts back on when
38454 - host->access.set_intr_mask(host, 0);
38455 + host->access->set_intr_mask(host, 0);
38457 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38458 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38460 for(i=0; i<NWD; i++) {
38461 struct gendisk *disk = ida_gendisk[ctlr][i];
38462 @@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38463 /* Wait (up to 2 seconds) for a command to complete */
38465 for (i = 200000; i > 0; i--) {
38466 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
38467 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
38469 udelay(10); /* a short fixed delay */
38471 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38472 index be73e9d..7fbf140 100644
38473 --- a/drivers/block/cpqarray.h
38474 +++ b/drivers/block/cpqarray.h
38475 @@ -99,7 +99,7 @@ struct ctlr_info {
38476 drv_info_t drv[NWD];
38477 struct proc_dir_entry *proc;
38479 - struct access_method access;
38480 + struct access_method *access;
38484 diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38485 index 434c77d..6d3219a 100644
38486 --- a/drivers/block/drbd/drbd_bitmap.c
38487 +++ b/drivers/block/drbd/drbd_bitmap.c
38488 @@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38489 submit_bio(rw, bio);
38490 /* this should not count as user activity and cause the
38491 * resync to throttle -- see drbd_rs_should_slow_down(). */
38492 - atomic_add(len >> 9, &device->rs_sect_ev);
38493 + atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38497 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38498 index b905e98..0812ed8 100644
38499 --- a/drivers/block/drbd/drbd_int.h
38500 +++ b/drivers/block/drbd/drbd_int.h
38501 @@ -385,7 +385,7 @@ struct drbd_epoch {
38502 struct drbd_connection *connection;
38503 struct list_head list;
38504 unsigned int barrier_nr;
38505 - atomic_t epoch_size; /* increased on every request added. */
38506 + atomic_unchecked_t epoch_size; /* increased on every request added. */
38507 atomic_t active; /* increased on every req. added, and dec on every finished. */
38508 unsigned long flags;
38510 @@ -946,7 +946,7 @@ struct drbd_device {
38511 unsigned int al_tr_number;
38513 wait_queue_head_t seq_wait;
38514 - atomic_t packet_seq;
38515 + atomic_unchecked_t packet_seq;
38516 unsigned int peer_seq;
38517 spinlock_t peer_seq_lock;
38518 unsigned long comm_bm_set; /* communicated number of set bits. */
38519 @@ -955,8 +955,8 @@ struct drbd_device {
38520 struct mutex own_state_mutex;
38521 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38522 char congestion_reason; /* Why we where congested... */
38523 - atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38524 - atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38525 + atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38526 + atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38527 int rs_last_sect_ev; /* counter to compare with */
38528 int rs_last_events; /* counter of read or write "events" (unit sectors)
38529 * on the lower level device when we last looked. */
38530 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38531 index 81fde9e..9948c05 100644
38532 --- a/drivers/block/drbd/drbd_main.c
38533 +++ b/drivers/block/drbd/drbd_main.c
38534 @@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38535 p->sector = sector;
38536 p->block_id = block_id;
38537 p->blksize = blksize;
38538 - p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38539 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38540 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38543 @@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38545 p->sector = cpu_to_be64(req->i.sector);
38546 p->block_id = (unsigned long)req;
38547 - p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38548 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38549 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38550 if (device->state.conn >= C_SYNC_SOURCE &&
38551 device->state.conn <= C_PAUSED_SYNC_T)
38552 @@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38553 atomic_set(&device->unacked_cnt, 0);
38554 atomic_set(&device->local_cnt, 0);
38555 atomic_set(&device->pp_in_use_by_net, 0);
38556 - atomic_set(&device->rs_sect_in, 0);
38557 - atomic_set(&device->rs_sect_ev, 0);
38558 + atomic_set_unchecked(&device->rs_sect_in, 0);
38559 + atomic_set_unchecked(&device->rs_sect_ev, 0);
38560 atomic_set(&device->ap_in_flight, 0);
38561 atomic_set(&device->md_io.in_use, 0);
38563 @@ -2683,8 +2683,8 @@ void drbd_destroy_connection(struct kref *kref)
38564 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38565 struct drbd_resource *resource = connection->resource;
38567 - if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38568 - drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38569 + if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38570 + drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38571 kfree(connection->current_epoch);
38573 idr_destroy(&connection->peer_devices);
38574 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38575 index 74df8cf..e41fc24 100644
38576 --- a/drivers/block/drbd/drbd_nl.c
38577 +++ b/drivers/block/drbd/drbd_nl.c
38578 @@ -3637,13 +3637,13 @@ finish:
38580 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38582 - static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38583 + static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38584 struct sk_buff *msg;
38585 struct drbd_genlmsghdr *d_out;
38589 - seq = atomic_inc_return(&drbd_genl_seq);
38590 + seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38591 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38594 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38595 index cee2035..22f66bd 100644
38596 --- a/drivers/block/drbd/drbd_receiver.c
38597 +++ b/drivers/block/drbd/drbd_receiver.c
38598 @@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38599 struct drbd_device *device = peer_device->device;
38602 - atomic_set(&device->packet_seq, 0);
38603 + atomic_set_unchecked(&device->packet_seq, 0);
38604 device->peer_seq = 0;
38606 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38607 @@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38611 - epoch_size = atomic_read(&epoch->epoch_size);
38612 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38614 switch (ev & ~EV_CLEANUP) {
38616 @@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38620 - atomic_set(&epoch->epoch_size, 0);
38621 + atomic_set_unchecked(&epoch->epoch_size, 0);
38622 /* atomic_set(&epoch->active, 0); is already zero */
38623 if (rv == FE_STILL_LIVE)
38625 @@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38626 conn_wait_active_ee_empty(connection);
38627 drbd_flush(connection);
38629 - if (atomic_read(&connection->current_epoch->epoch_size)) {
38630 + if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38631 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38634 @@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38638 - atomic_set(&epoch->epoch_size, 0);
38639 + atomic_set_unchecked(&epoch->epoch_size, 0);
38640 atomic_set(&epoch->active, 0);
38642 spin_lock(&connection->epoch_lock);
38643 - if (atomic_read(&connection->current_epoch->epoch_size)) {
38644 + if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38645 list_add(&epoch->list, &connection->current_epoch->list);
38646 connection->current_epoch = epoch;
38647 connection->epochs++;
38648 @@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38649 list_add_tail(&peer_req->w.list, &device->sync_ee);
38650 spin_unlock_irq(&device->resource->req_lock);
38652 - atomic_add(pi->size >> 9, &device->rs_sect_ev);
38653 + atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38654 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38657 @@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38658 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38661 - atomic_add(pi->size >> 9, &device->rs_sect_in);
38662 + atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38666 @@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38668 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38669 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38670 - atomic_inc(&connection->current_epoch->epoch_size);
38671 + atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38672 err2 = drbd_drain_block(peer_device, pi->size);
38675 @@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38677 spin_lock(&connection->epoch_lock);
38678 peer_req->epoch = connection->current_epoch;
38679 - atomic_inc(&peer_req->epoch->epoch_size);
38680 + atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38681 atomic_inc(&peer_req->epoch->active);
38682 spin_unlock(&connection->epoch_lock);
38684 @@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38686 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38687 (int)part_stat_read(&disk->part0, sectors[1]) -
38688 - atomic_read(&device->rs_sect_ev);
38689 + atomic_read_unchecked(&device->rs_sect_ev);
38691 if (atomic_read(&device->ap_actlog_cnt)
38692 || curr_events - device->rs_last_events > 64) {
38693 @@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38694 device->use_csums = true;
38695 } else if (pi->cmd == P_OV_REPLY) {
38696 /* track progress, we may need to throttle */
38697 - atomic_add(size >> 9, &device->rs_sect_in);
38698 + atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38699 peer_req->w.cb = w_e_end_ov_reply;
38700 dec_rs_pending(device);
38701 /* drbd_rs_begin_io done when we sent this request,
38702 @@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38706 - atomic_add(size >> 9, &device->rs_sect_ev);
38707 + atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38710 update_receiver_timing_details(connection, drbd_submit_peer_request);
38711 @@ -4564,7 +4564,7 @@ struct data_cmd {
38712 int expect_payload;
38714 int (*fn)(struct drbd_connection *, struct packet_info *);
38718 static struct data_cmd drbd_cmd_handler[] = {
38719 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38720 @@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38721 if (!list_empty(&connection->current_epoch->list))
38722 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38723 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38724 - atomic_set(&connection->current_epoch->epoch_size, 0);
38725 + atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38726 connection->send.seen_any_write_yet = false;
38728 drbd_info(connection, "Connection closed\n");
38729 @@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38732 dec_rs_pending(device);
38733 - atomic_add(blksize >> 9, &device->rs_sect_in);
38734 + atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38738 @@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38739 struct asender_cmd {
38741 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38745 static struct asender_cmd asender_tbl[] = {
38746 [P_PING] = { 0, got_Ping },
38747 diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38748 index d0fae55..4469096 100644
38749 --- a/drivers/block/drbd/drbd_worker.c
38750 +++ b/drivers/block/drbd/drbd_worker.c
38751 @@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38752 list_add_tail(&peer_req->w.list, &device->read_ee);
38753 spin_unlock_irq(&device->resource->req_lock);
38755 - atomic_add(size >> 9, &device->rs_sect_ev);
38756 + atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38757 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38760 @@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38761 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38764 - sect_in = atomic_xchg(&device->rs_sect_in, 0);
38765 + sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38766 device->rs_in_flight -= sect_in;
38769 @@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38770 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38771 struct fifo_buffer *plan;
38773 - atomic_set(&device->rs_sect_in, 0);
38774 - atomic_set(&device->rs_sect_ev, 0);
38775 + atomic_set_unchecked(&device->rs_sect_in, 0);
38776 + atomic_set_unchecked(&device->rs_sect_ev, 0);
38777 device->rs_in_flight = 0;
38778 device->rs_last_events =
38779 (int)part_stat_read(&disk->part0, sectors[0]) +
38780 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38781 index 09e628da..7607aaa 100644
38782 --- a/drivers/block/pktcdvd.c
38783 +++ b/drivers/block/pktcdvd.c
38784 @@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38786 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38788 - return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38789 + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38793 @@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38796 pd->settings.fp = ti.fp;
38797 - pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38798 + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38801 pd->nwa = be32_to_cpu(ti.next_writable);
38802 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38803 index ec6c5c6..820ee2abc 100644
38804 --- a/drivers/block/rbd.c
38805 +++ b/drivers/block/rbd.c
38807 * If the counter is already at its maximum value returns
38808 * -EINVAL without updating it.
38810 -static int atomic_inc_return_safe(atomic_t *v)
38811 +static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38813 unsigned int counter;
38815 diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38816 index e5565fb..71be10b4 100644
38817 --- a/drivers/block/smart1,2.h
38818 +++ b/drivers/block/smart1,2.h
38819 @@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38822 static struct access_method smart4_access = {
38823 - smart4_submit_command,
38824 - smart4_intr_mask,
38825 - smart4_fifo_full,
38826 - smart4_intr_pending,
38827 - smart4_completed,
38828 + .submit_command = smart4_submit_command,
38829 + .set_intr_mask = smart4_intr_mask,
38830 + .fifo_full = smart4_fifo_full,
38831 + .intr_pending = smart4_intr_pending,
38832 + .command_completed = smart4_completed,
38836 @@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38839 static struct access_method smart2_access = {
38840 - smart2_submit_command,
38841 - smart2_intr_mask,
38842 - smart2_fifo_full,
38843 - smart2_intr_pending,
38844 - smart2_completed,
38845 + .submit_command = smart2_submit_command,
38846 + .set_intr_mask = smart2_intr_mask,
38847 + .fifo_full = smart2_fifo_full,
38848 + .intr_pending = smart2_intr_pending,
38849 + .command_completed = smart2_completed,
38853 @@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38856 static struct access_method smart2e_access = {
38857 - smart2e_submit_command,
38858 - smart2e_intr_mask,
38859 - smart2e_fifo_full,
38860 - smart2e_intr_pending,
38861 - smart2e_completed,
38862 + .submit_command = smart2e_submit_command,
38863 + .set_intr_mask = smart2e_intr_mask,
38864 + .fifo_full = smart2e_fifo_full,
38865 + .intr_pending = smart2e_intr_pending,
38866 + .command_completed = smart2e_completed,
38870 @@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38873 static struct access_method smart1_access = {
38874 - smart1_submit_command,
38875 - smart1_intr_mask,
38876 - smart1_fifo_full,
38877 - smart1_intr_pending,
38878 - smart1_completed,
38879 + .submit_command = smart1_submit_command,
38880 + .set_intr_mask = smart1_intr_mask,
38881 + .fifo_full = smart1_fifo_full,
38882 + .intr_pending = smart1_intr_pending,
38883 + .command_completed = smart1_completed,
38885 diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38886 index 55c135b..9f8d60c 100644
38887 --- a/drivers/bluetooth/btwilink.c
38888 +++ b/drivers/bluetooth/btwilink.c
38889 @@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38891 static int bt_ti_probe(struct platform_device *pdev)
38893 - static struct ti_st *hst;
38894 + struct ti_st *hst;
38895 struct hci_dev *hdev;
38898 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38899 index 5d28a45..a538f90 100644
38900 --- a/drivers/cdrom/cdrom.c
38901 +++ b/drivers/cdrom/cdrom.c
38902 @@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38903 ENSURE(reset, CDC_RESET);
38904 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38906 - cdo->n_minors = 0;
38907 cdi->options = CDO_USE_FFLAGS;
38909 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38910 @@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38912 cdi->cdda_method = CDDA_OLD;
38914 - if (!cdo->generic_packet)
38915 - cdo->generic_packet = cdrom_dummy_generic_packet;
38916 + if (!cdo->generic_packet) {
38917 + pax_open_kernel();
38918 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38919 + pax_close_kernel();
38922 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38923 mutex_lock(&cdrom_mutex);
38924 @@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38928 - cdi->ops->n_minors--;
38929 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38932 @@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38936 - cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38937 + cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38941 @@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38942 struct cdrom_device_info *cdi;
38945 - ret = scnprintf(info + *pos, max_size - *pos, header);
38946 + ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38950 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38951 index 584bc31..e64a12c 100644
38952 --- a/drivers/cdrom/gdrom.c
38953 +++ b/drivers/cdrom/gdrom.c
38954 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38955 .audio_ioctl = gdrom_audio_ioctl,
38956 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38957 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38961 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38962 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38963 index a4af822..ed58cd1 100644
38964 --- a/drivers/char/Kconfig
38965 +++ b/drivers/char/Kconfig
38966 @@ -17,7 +17,8 @@ config DEVMEM
38969 bool "/dev/kmem virtual device support"
38972 + depends on !GRKERNSEC_KMEM
38974 Say Y here if you want to support the /dev/kmem device. The
38975 /dev/kmem device is rarely used, but can be used for certain
38976 @@ -586,6 +587,7 @@ config DEVPORT
38979 depends on ISA || PCI
38980 + depends on !GRKERNSEC_KMEM
38983 source "drivers/s390/char/Kconfig"
38984 diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38985 index a48e05b..6bac831 100644
38986 --- a/drivers/char/agp/compat_ioctl.c
38987 +++ b/drivers/char/agp/compat_ioctl.c
38988 @@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38992 - if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38993 + if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38994 sizeof(*usegment) * ureserve.seg_count)) {
38997 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38998 index 09f17eb..8531d2f 100644
38999 --- a/drivers/char/agp/frontend.c
39000 +++ b/drivers/char/agp/frontend.c
39001 @@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39002 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
39005 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
39006 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
39009 client = agp_find_client_by_pid(reserve.pid);
39010 @@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39011 if (segment == NULL)
39014 - if (copy_from_user(segment, (void __user *) reserve.seg_list,
39015 + if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
39016 sizeof(struct agp_segment) * reserve.seg_count)) {
39019 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
39020 index 4f94375..413694e 100644
39021 --- a/drivers/char/genrtc.c
39022 +++ b/drivers/char/genrtc.c
39023 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
39027 + memset(&pll, 0, sizeof(pll));
39028 if (get_rtc_pll(&pll))
39031 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
39032 index 5c0baa9..44011b1 100644
39033 --- a/drivers/char/hpet.c
39034 +++ b/drivers/char/hpet.c
39035 @@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
39039 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
39040 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
39041 struct hpet_info *info)
39043 struct hpet_timer __iomem *timer;
39044 diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
39045 index a43048b..14724d5 100644
39046 --- a/drivers/char/i8k.c
39047 +++ b/drivers/char/i8k.c
39048 @@ -790,7 +790,7 @@ static const struct i8k_config_data i8k_config_data[] = {
39052 -static struct dmi_system_id i8k_dmi_table[] __initdata = {
39053 +static const struct dmi_system_id i8k_dmi_table[] __initconst = {
39055 .ident = "Dell Inspiron",
39057 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
39058 index bf75f63..359fa10 100644
39059 --- a/drivers/char/ipmi/ipmi_msghandler.c
39060 +++ b/drivers/char/ipmi/ipmi_msghandler.c
39061 @@ -436,7 +436,7 @@ struct ipmi_smi {
39062 struct proc_dir_entry *proc_dir;
39063 char proc_dir_name[10];
39065 - atomic_t stats[IPMI_NUM_STATS];
39066 + atomic_unchecked_t stats[IPMI_NUM_STATS];
39069 * run_to_completion duplicate of smb_info, smi_info
39070 @@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
39071 static DEFINE_MUTEX(smi_watchers_mutex);
39073 #define ipmi_inc_stat(intf, stat) \
39074 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
39075 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
39076 #define ipmi_get_stat(intf, stat) \
39077 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
39078 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
39080 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
39081 "ACPI", "SMBIOS", "PCI",
39082 @@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
39083 INIT_LIST_HEAD(&intf->cmd_rcvrs);
39084 init_waitqueue_head(&intf->waitq);
39085 for (i = 0; i < IPMI_NUM_STATS; i++)
39086 - atomic_set(&intf->stats[i], 0);
39087 + atomic_set_unchecked(&intf->stats[i], 0);
39089 intf->proc_dir = NULL;
39091 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
39092 index 8a45e92..e41b1c7 100644
39093 --- a/drivers/char/ipmi/ipmi_si_intf.c
39094 +++ b/drivers/char/ipmi/ipmi_si_intf.c
39095 @@ -289,7 +289,7 @@ struct smi_info {
39096 unsigned char slave_addr;
39098 /* Counters and things for the proc filesystem. */
39099 - atomic_t stats[SI_NUM_STATS];
39100 + atomic_unchecked_t stats[SI_NUM_STATS];
39102 struct task_struct *thread;
39104 @@ -298,9 +298,9 @@ struct smi_info {
39107 #define smi_inc_stat(smi, stat) \
39108 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
39109 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
39110 #define smi_get_stat(smi, stat) \
39111 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
39112 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
39114 #define SI_MAX_PARMS 4
39116 @@ -3500,7 +3500,7 @@ static int try_smi_init(struct smi_info *new_smi)
39117 atomic_set(&new_smi->req_events, 0);
39118 new_smi->run_to_completion = false;
39119 for (i = 0; i < SI_NUM_STATS; i++)
39120 - atomic_set(&new_smi->stats[i], 0);
39121 + atomic_set_unchecked(&new_smi->stats[i], 0);
39123 new_smi->interrupt_disabled = true;
39124 atomic_set(&new_smi->need_watch, 0);
39125 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
39126 index 6b1721f..fda9398 100644
39127 --- a/drivers/char/mem.c
39128 +++ b/drivers/char/mem.c
39130 #include <linux/raw.h>
39131 #include <linux/tty.h>
39132 #include <linux/capability.h>
39133 +#include <linux/security.h>
39134 #include <linux/ptrace.h>
39135 #include <linux/device.h>
39136 #include <linux/highmem.h>
39139 #define DEVPORT_MINOR 4
39141 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39142 +extern const struct file_operations grsec_fops;
39145 static inline unsigned long size_inside_page(unsigned long start,
39146 unsigned long size)
39148 @@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39150 while (cursor < to) {
39151 if (!devmem_is_allowed(pfn)) {
39152 +#ifdef CONFIG_GRKERNSEC_KMEM
39153 + gr_handle_mem_readwrite(from, to);
39156 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
39157 current->comm, from, to);
39161 cursor += PAGE_SIZE;
39162 @@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39166 +#elif defined(CONFIG_GRKERNSEC_KMEM)
39167 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39172 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39174 @@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39177 while (count > 0) {
39178 - unsigned long remaining;
39179 + unsigned long remaining = 0;
39182 sz = size_inside_page(p, count);
39184 @@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39188 - remaining = copy_to_user(buf, ptr, sz);
39189 +#ifdef CONFIG_PAX_USERCOPY
39190 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39192 + unxlate_dev_mem_ptr(p, ptr);
39195 + remaining = probe_kernel_read(temp, ptr, sz);
39201 + remaining = copy_to_user(buf, temp, sz);
39203 +#ifdef CONFIG_PAX_USERCOPY
39207 unxlate_dev_mem_ptr(p, ptr);
39210 @@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39211 size_t count, loff_t *ppos)
39213 unsigned long p = *ppos;
39214 - ssize_t low_count, read, sz;
39215 + ssize_t low_count, read, sz, err = 0;
39216 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39220 if (p < (unsigned long) high_memory) {
39221 @@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39224 while (low_count > 0) {
39227 sz = size_inside_page(p, low_count);
39230 @@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39232 kbuf = xlate_dev_kmem_ptr((void *)p);
39234 - if (copy_to_user(buf, kbuf, sz))
39235 +#ifdef CONFIG_PAX_USERCOPY
39236 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39239 + err = probe_kernel_read(temp, kbuf, sz);
39245 + err = copy_to_user(buf, temp, sz);
39247 +#ifdef CONFIG_PAX_USERCOPY
39255 @@ -802,6 +851,9 @@ static const struct memdev {
39256 #ifdef CONFIG_PRINTK
39257 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39259 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39260 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39264 static int memory_open(struct inode *inode, struct file *filp)
39265 @@ -863,7 +915,7 @@ static int __init chr_dev_init(void)
39268 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39269 - NULL, devlist[minor].name);
39270 + NULL, "%s", devlist[minor].name);
39274 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39275 index 9df78e2..01ba9ae 100644
39276 --- a/drivers/char/nvram.c
39277 +++ b/drivers/char/nvram.c
39278 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39280 spin_unlock_irq(&rtc_lock);
39282 - if (copy_to_user(buf, contents, tmp - contents))
39283 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39287 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39288 index 0ea9986..e7b07e4 100644
39289 --- a/drivers/char/pcmcia/synclink_cs.c
39290 +++ b/drivers/char/pcmcia/synclink_cs.c
39291 @@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39293 if (debug_level >= DEBUG_LEVEL_INFO)
39294 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39295 - __FILE__, __LINE__, info->device_name, port->count);
39296 + __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39298 if (tty_port_close_start(port, tty, filp) == 0)
39300 @@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39302 if (debug_level >= DEBUG_LEVEL_INFO)
39303 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39304 - tty->driver->name, port->count);
39305 + tty->driver->name, atomic_read(&port->count));
39308 /* Wait until the transmitter is empty.
39309 @@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39311 if (debug_level >= DEBUG_LEVEL_INFO)
39312 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39313 - __FILE__, __LINE__, tty->driver->name, port->count);
39314 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39316 /* If port is closing, signal caller to try again */
39317 if (port->flags & ASYNC_CLOSING){
39318 @@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39321 spin_lock(&port->lock);
39323 + atomic_inc(&port->count);
39324 spin_unlock(&port->lock);
39325 spin_unlock_irqrestore(&info->netlock, flags);
39327 - if (port->count == 1) {
39328 + if (atomic_read(&port->count) == 1) {
39329 /* 1st open on this device, init hardware */
39330 retval = startup(info, tty);
39332 @@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39333 unsigned short new_crctype;
39335 /* return error if TTY interface open */
39336 - if (info->port.count)
39337 + if (atomic_read(&info->port.count))
39341 @@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39343 /* arbitrate between network and tty opens */
39344 spin_lock_irqsave(&info->netlock, flags);
39345 - if (info->port.count != 0 || info->netcount != 0) {
39346 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39347 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39348 spin_unlock_irqrestore(&info->netlock, flags);
39350 @@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39351 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39353 /* return error if TTY interface open */
39354 - if (info->port.count)
39355 + if (atomic_read(&info->port.count))
39358 if (cmd != SIOCWANDEV)
39359 diff --git a/drivers/char/random.c b/drivers/char/random.c
39360 index 9cd6968..6416f00 100644
39361 --- a/drivers/char/random.c
39362 +++ b/drivers/char/random.c
39363 @@ -289,9 +289,6 @@
39365 * To allow fractional bits to be tracked, the entropy_count field is
39366 * denominated in units of 1/8th bits.
39368 - * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39369 - * credit_entropy_bits() needs to be 64 bits wide.
39371 #define ENTROPY_SHIFT 3
39372 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39373 @@ -439,9 +436,9 @@ struct entropy_store {
39376 static void push_to_pool(struct work_struct *work);
39377 -static __u32 input_pool_data[INPUT_POOL_WORDS];
39378 -static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39379 -static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39380 +static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39381 +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39382 +static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39384 static struct entropy_store input_pool = {
39385 .poolinfo = &poolinfo_table[0],
39386 @@ -635,7 +632,7 @@ retry:
39387 /* The +2 corresponds to the /4 in the denominator */
39390 - unsigned int anfrac = min(pnfrac, pool_size/2);
39391 + u64 anfrac = min(pnfrac, pool_size/2);
39393 ((pool_size - entropy_count)*anfrac*3) >> s;
39395 @@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39397 extract_buf(r, tmp);
39398 i = min_t(int, nbytes, EXTRACT_SIZE);
39399 - if (copy_to_user(buf, tmp, i)) {
39400 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39404 @@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39405 static int proc_do_uuid(struct ctl_table *table, int write,
39406 void __user *buffer, size_t *lenp, loff_t *ppos)
39408 - struct ctl_table fake_table;
39409 + ctl_table_no_const fake_table;
39410 unsigned char buf[64], tmp_uuid[16], *uuid;
39412 uuid = table->data;
39413 @@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39414 static int proc_do_entropy(struct ctl_table *table, int write,
39415 void __user *buffer, size_t *lenp, loff_t *ppos)
39417 - struct ctl_table fake_table;
39418 + ctl_table_no_const fake_table;
39421 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39422 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39423 index e496dae..3db53b6 100644
39424 --- a/drivers/char/sonypi.c
39425 +++ b/drivers/char/sonypi.c
39428 #include <asm/uaccess.h>
39429 #include <asm/io.h>
39430 +#include <asm/local.h>
39432 #include <linux/sonypi.h>
39434 @@ -490,7 +491,7 @@ static struct sonypi_device {
39435 spinlock_t fifo_lock;
39436 wait_queue_head_t fifo_proc_list;
39437 struct fasync_struct *fifo_async;
39439 + local_t open_count;
39441 struct input_dev *input_jog_dev;
39442 struct input_dev *input_key_dev;
39443 @@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39444 static int sonypi_misc_release(struct inode *inode, struct file *file)
39446 mutex_lock(&sonypi_device.lock);
39447 - sonypi_device.open_count--;
39448 + local_dec(&sonypi_device.open_count);
39449 mutex_unlock(&sonypi_device.lock);
39452 @@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39454 mutex_lock(&sonypi_device.lock);
39455 /* Flush input queue on first open */
39456 - if (!sonypi_device.open_count)
39457 + if (!local_read(&sonypi_device.open_count))
39458 kfifo_reset(&sonypi_device.fifo);
39459 - sonypi_device.open_count++;
39460 + local_inc(&sonypi_device.open_count);
39461 mutex_unlock(&sonypi_device.lock);
39464 @@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39466 static struct platform_device *sonypi_platform_device;
39468 -static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39469 +static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39471 .ident = "Sony Vaio",
39473 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39474 index 565a947..dcdc06e 100644
39475 --- a/drivers/char/tpm/tpm_acpi.c
39476 +++ b/drivers/char/tpm/tpm_acpi.c
39477 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39478 virt = acpi_os_map_iomem(start, len);
39480 kfree(log->bios_event_log);
39481 + log->bios_event_log = NULL;
39482 printk("%s: ERROR - Unable to map memory\n", __func__);
39486 - memcpy_fromio(log->bios_event_log, virt, len);
39487 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39489 acpi_os_unmap_iomem(virt, len);
39491 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39492 index 3a56a13..f8cbd25 100644
39493 --- a/drivers/char/tpm/tpm_eventlog.c
39494 +++ b/drivers/char/tpm/tpm_eventlog.c
39495 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39498 if ((event->event_type == 0 && event->event_size == 0) ||
39499 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39500 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39504 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39507 if ((event->event_type == 0 && event->event_size == 0) ||
39508 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39509 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39513 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39516 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39517 - seq_putc(m, data[i]);
39518 + if (!seq_putc(m, data[i]))
39523 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39524 index 50754d20..9561cdc 100644
39525 --- a/drivers/char/virtio_console.c
39526 +++ b/drivers/char/virtio_console.c
39527 @@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39531 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39532 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39536 @@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39537 if (!port_has_data(port) && !port->host_connected)
39540 - return fill_readbuf(port, ubuf, count, true);
39541 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39544 static int wait_port_writable(struct port *port, bool nonblock)
39545 diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39546 index 956b7e5..b655045 100644
39547 --- a/drivers/clk/clk-composite.c
39548 +++ b/drivers/clk/clk-composite.c
39549 @@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39551 struct clk_init_data init;
39552 struct clk_composite *composite;
39553 - struct clk_ops *clk_composite_ops;
39554 + clk_ops_no_const *clk_composite_ops;
39556 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39558 diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
39559 index e4c7538..99c50cd 100644
39560 --- a/drivers/clk/samsung/clk.h
39561 +++ b/drivers/clk/samsung/clk.h
39562 @@ -260,7 +260,7 @@ struct samsung_gate_clock {
39563 #define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
39564 __GATE(_id, dname, cname, pname, o, b, f, gf, a)
39566 -#define PNAME(x) static const char *x[] __initdata
39567 +#define PNAME(x) static const char * const x[] __initconst
39570 * struct samsung_clk_reg_dump: register dump of clock controller registers.
39571 diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39572 index dd3a78c..386d49c 100644
39573 --- a/drivers/clk/socfpga/clk-gate.c
39574 +++ b/drivers/clk/socfpga/clk-gate.c
39576 #include <linux/mfd/syscon.h>
39577 #include <linux/of.h>
39578 #include <linux/regmap.h>
39579 +#include <asm/pgtable.h>
39583 @@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39587 -static struct clk_ops gateclk_ops = {
39588 +static clk_ops_no_const gateclk_ops __read_only = {
39589 .prepare = socfpga_clk_prepare,
39590 .recalc_rate = socfpga_clk_recalc_rate,
39591 .get_parent = socfpga_clk_get_parent,
39592 @@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39593 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39594 socfpga_clk->hw.bit_idx = clk_gate[1];
39596 - gateclk_ops.enable = clk_gate_ops.enable;
39597 - gateclk_ops.disable = clk_gate_ops.disable;
39598 + pax_open_kernel();
39599 + *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39600 + *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39601 + pax_close_kernel();
39604 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39605 diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39606 index de6da95..c98278b 100644
39607 --- a/drivers/clk/socfpga/clk-pll.c
39608 +++ b/drivers/clk/socfpga/clk-pll.c
39610 #include <linux/io.h>
39611 #include <linux/of.h>
39612 #include <linux/of_address.h>
39613 +#include <asm/pgtable.h>
39617 @@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39618 CLK_MGR_PLL_CLK_SRC_MASK;
39621 -static struct clk_ops clk_pll_ops = {
39622 +static clk_ops_no_const clk_pll_ops __read_only = {
39623 .recalc_rate = clk_pll_recalc_rate,
39624 .get_parent = clk_pll_get_parent,
39626 @@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39627 pll_clk->hw.hw.init = &init;
39629 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39630 - clk_pll_ops.enable = clk_gate_ops.enable;
39631 - clk_pll_ops.disable = clk_gate_ops.disable;
39632 + pax_open_kernel();
39633 + *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39634 + *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39635 + pax_close_kernel();
39637 clk = clk_register(NULL, &pll_clk->hw.hw);
39638 if (WARN_ON(IS_ERR(clk))) {
39639 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39640 index b0c18ed..1713a80 100644
39641 --- a/drivers/cpufreq/acpi-cpufreq.c
39642 +++ b/drivers/cpufreq/acpi-cpufreq.c
39643 @@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39644 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39645 per_cpu(acfreq_data, cpu) = data;
39647 - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39648 - acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39649 + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39650 + pax_open_kernel();
39651 + *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39652 + pax_close_kernel();
39655 result = acpi_processor_register_performance(data->acpi_data, cpu);
39657 @@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39658 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39660 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39661 - acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39662 + pax_open_kernel();
39663 + *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39664 + pax_close_kernel();
39668 @@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39672 - acpi_cpufreq_driver.boost_supported = true;
39673 - acpi_cpufreq_driver.boost_enabled = boost_state(0);
39674 + pax_open_kernel();
39675 + *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39676 + *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39677 + pax_close_kernel();
39679 cpu_notifier_register_begin();
39681 diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39682 index bab67db..91af7e3 100644
39683 --- a/drivers/cpufreq/cpufreq-dt.c
39684 +++ b/drivers/cpufreq/cpufreq-dt.c
39685 @@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39686 if (!IS_ERR(cpu_reg))
39687 regulator_put(cpu_reg);
39689 - dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39690 + pax_open_kernel();
39691 + *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39692 + pax_close_kernel();
39694 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39696 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39697 index 8ae655c..3141442 100644
39698 --- a/drivers/cpufreq/cpufreq.c
39699 +++ b/drivers/cpufreq/cpufreq.c
39700 @@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39703 mutex_lock(&cpufreq_governor_mutex);
39704 - list_del(&governor->governor_list);
39705 + pax_list_del(&governor->governor_list);
39706 mutex_unlock(&cpufreq_governor_mutex);
39709 @@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39713 -static struct notifier_block __refdata cpufreq_cpu_notifier = {
39714 +static struct notifier_block cpufreq_cpu_notifier = {
39715 .notifier_call = cpufreq_cpu_callback,
39718 @@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39721 write_lock_irqsave(&cpufreq_driver_lock, flags);
39722 - cpufreq_driver->boost_enabled = state;
39723 + pax_open_kernel();
39724 + *(bool *)&cpufreq_driver->boost_enabled = state;
39725 + pax_close_kernel();
39726 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39728 ret = cpufreq_driver->set_boost(state);
39730 write_lock_irqsave(&cpufreq_driver_lock, flags);
39731 - cpufreq_driver->boost_enabled = !state;
39732 + pax_open_kernel();
39733 + *(bool *)&cpufreq_driver->boost_enabled = !state;
39734 + pax_close_kernel();
39735 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39737 pr_err("%s: Cannot %s BOOST\n",
39738 @@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39739 cpufreq_driver = driver_data;
39740 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39742 - if (driver_data->setpolicy)
39743 - driver_data->flags |= CPUFREQ_CONST_LOOPS;
39744 + if (driver_data->setpolicy) {
39745 + pax_open_kernel();
39746 + *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39747 + pax_close_kernel();
39750 if (cpufreq_boost_supported()) {
39752 * Check if driver provides function to enable boost -
39753 * if not, use cpufreq_boost_set_sw as default
39755 - if (!cpufreq_driver->set_boost)
39756 - cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39757 + if (!cpufreq_driver->set_boost) {
39758 + pax_open_kernel();
39759 + *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39760 + pax_close_kernel();
39763 ret = cpufreq_sysfs_create_file(&boost.attr);
39765 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39766 index 1b44496..b80ff5e 100644
39767 --- a/drivers/cpufreq/cpufreq_governor.c
39768 +++ b/drivers/cpufreq/cpufreq_governor.c
39769 @@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39770 struct dbs_data *dbs_data;
39771 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39772 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39773 - struct od_ops *od_ops = NULL;
39774 + const struct od_ops *od_ops = NULL;
39775 struct od_dbs_tuners *od_tuners = NULL;
39776 struct cs_dbs_tuners *cs_tuners = NULL;
39777 struct cpu_dbs_common_info *cpu_cdbs;
39778 @@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39780 if ((cdata->governor == GOV_CONSERVATIVE) &&
39781 (!policy->governor->initialized)) {
39782 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39783 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39785 cpufreq_register_notifier(cs_ops->notifier_block,
39786 CPUFREQ_TRANSITION_NOTIFIER);
39787 @@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39789 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39790 (policy->governor->initialized == 1)) {
39791 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39792 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39794 cpufreq_unregister_notifier(cs_ops->notifier_block,
39795 CPUFREQ_TRANSITION_NOTIFIER);
39796 diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39797 index cc401d1..8197340 100644
39798 --- a/drivers/cpufreq/cpufreq_governor.h
39799 +++ b/drivers/cpufreq/cpufreq_governor.h
39800 @@ -212,7 +212,7 @@ struct common_dbs_data {
39801 void (*exit)(struct dbs_data *dbs_data);
39803 /* Governor specific ops, see below */
39805 + const void *gov_ops;
39808 /* Governor Per policy data */
39809 @@ -232,7 +232,7 @@ struct od_ops {
39810 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39811 unsigned int freq_next, unsigned int relation);
39812 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39817 struct notifier_block *notifier_block;
39818 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39819 index ad3f38f..8f086cd 100644
39820 --- a/drivers/cpufreq/cpufreq_ondemand.c
39821 +++ b/drivers/cpufreq/cpufreq_ondemand.c
39822 @@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39824 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39826 -static struct od_ops od_ops = {
39827 +static struct od_ops od_ops __read_only = {
39828 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39829 .powersave_bias_target = generic_powersave_bias_target,
39830 .freq_increase = dbs_freq_increase,
39831 @@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39832 (struct cpufreq_policy *, unsigned int, unsigned int),
39833 unsigned int powersave_bias)
39835 - od_ops.powersave_bias_target = f;
39836 + pax_open_kernel();
39837 + *(void **)&od_ops.powersave_bias_target = f;
39838 + pax_close_kernel();
39839 od_set_powersave_bias(powersave_bias);
39841 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39843 void od_unregister_powersave_bias_handler(void)
39845 - od_ops.powersave_bias_target = generic_powersave_bias_target;
39846 + pax_open_kernel();
39847 + *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39848 + pax_close_kernel();
39849 od_set_powersave_bias(0);
39851 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39852 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39853 index c45d274..0f469f7 100644
39854 --- a/drivers/cpufreq/intel_pstate.c
39855 +++ b/drivers/cpufreq/intel_pstate.c
39856 @@ -134,10 +134,10 @@ struct pstate_funcs {
39857 struct cpu_defaults {
39858 struct pstate_adjust_policy pid_policy;
39859 struct pstate_funcs funcs;
39863 static struct pstate_adjust_policy pid_params;
39864 -static struct pstate_funcs pstate_funcs;
39865 +static struct pstate_funcs *pstate_funcs;
39866 static int hwp_active;
39868 struct perf_limits {
39869 @@ -721,18 +721,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39871 cpu->pstate.current_pstate = pstate;
39873 - pstate_funcs.set(cpu, pstate);
39874 + pstate_funcs->set(cpu, pstate);
39877 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39879 - cpu->pstate.min_pstate = pstate_funcs.get_min();
39880 - cpu->pstate.max_pstate = pstate_funcs.get_max();
39881 - cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39882 - cpu->pstate.scaling = pstate_funcs.get_scaling();
39883 + cpu->pstate.min_pstate = pstate_funcs->get_min();
39884 + cpu->pstate.max_pstate = pstate_funcs->get_max();
39885 + cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39886 + cpu->pstate.scaling = pstate_funcs->get_scaling();
39888 - if (pstate_funcs.get_vid)
39889 - pstate_funcs.get_vid(cpu);
39890 + if (pstate_funcs->get_vid)
39891 + pstate_funcs->get_vid(cpu);
39892 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39895 @@ -1056,15 +1056,15 @@ static unsigned int force_load;
39897 static int intel_pstate_msrs_not_valid(void)
39899 - if (!pstate_funcs.get_max() ||
39900 - !pstate_funcs.get_min() ||
39901 - !pstate_funcs.get_turbo())
39902 + if (!pstate_funcs->get_max() ||
39903 + !pstate_funcs->get_min() ||
39904 + !pstate_funcs->get_turbo())
39910 -static void copy_pid_params(struct pstate_adjust_policy *policy)
39911 +static void copy_pid_params(const struct pstate_adjust_policy *policy)
39913 pid_params.sample_rate_ms = policy->sample_rate_ms;
39914 pid_params.p_gain_pct = policy->p_gain_pct;
39915 @@ -1076,12 +1076,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39917 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39919 - pstate_funcs.get_max = funcs->get_max;
39920 - pstate_funcs.get_min = funcs->get_min;
39921 - pstate_funcs.get_turbo = funcs->get_turbo;
39922 - pstate_funcs.get_scaling = funcs->get_scaling;
39923 - pstate_funcs.set = funcs->set;
39924 - pstate_funcs.get_vid = funcs->get_vid;
39925 + pstate_funcs = funcs;
39928 #if IS_ENABLED(CONFIG_ACPI)
39929 diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39930 index 529cfd9..0e28fff 100644
39931 --- a/drivers/cpufreq/p4-clockmod.c
39932 +++ b/drivers/cpufreq/p4-clockmod.c
39933 @@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39934 case 0x0F: /* Core Duo */
39935 case 0x16: /* Celeron Core */
39936 case 0x1C: /* Atom */
39937 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39938 + pax_open_kernel();
39939 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39940 + pax_close_kernel();
39941 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39942 case 0x0D: /* Pentium M (Dothan) */
39943 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39944 + pax_open_kernel();
39945 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39946 + pax_close_kernel();
39948 case 0x09: /* Pentium M (Banias) */
39949 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39950 @@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39952 /* on P-4s, the TSC runs with constant frequency independent whether
39953 * throttling is active or not. */
39954 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39955 + pax_open_kernel();
39956 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39957 + pax_close_kernel();
39959 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39960 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39961 diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39962 index 9bb42ba..b01b4a2 100644
39963 --- a/drivers/cpufreq/sparc-us3-cpufreq.c
39964 +++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39965 @@ -18,14 +18,12 @@
39966 #include <asm/head.h>
39967 #include <asm/timer.h>
39969 -static struct cpufreq_driver *cpufreq_us3_driver;
39971 struct us3_freq_percpu_info {
39972 struct cpufreq_frequency_table table[4];
39975 /* Indexed by cpu number. */
39976 -static struct us3_freq_percpu_info *us3_freq_table;
39977 +static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39979 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39980 * in the Safari config register.
39981 @@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39983 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39985 - if (cpufreq_us3_driver)
39986 - us3_freq_target(policy, 0);
39987 + us3_freq_target(policy, 0);
39992 +static int __init us3_freq_init(void);
39993 +static void __exit us3_freq_exit(void);
39995 +static struct cpufreq_driver cpufreq_us3_driver = {
39996 + .init = us3_freq_cpu_init,
39997 + .verify = cpufreq_generic_frequency_table_verify,
39998 + .target_index = us3_freq_target,
39999 + .get = us3_freq_get,
40000 + .exit = us3_freq_cpu_exit,
40001 + .name = "UltraSPARC-III",
40005 static int __init us3_freq_init(void)
40007 unsigned long manuf, impl, ver;
40010 if (tlb_type != cheetah && tlb_type != cheetah_plus)
40012 @@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
40013 (impl == CHEETAH_IMPL ||
40014 impl == CHEETAH_PLUS_IMPL ||
40015 impl == JAGUAR_IMPL ||
40016 - impl == PANTHER_IMPL)) {
40017 - struct cpufreq_driver *driver;
40020 - driver = kzalloc(sizeof(*driver), GFP_KERNEL);
40024 - us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
40026 - if (!us3_freq_table)
40029 - driver->init = us3_freq_cpu_init;
40030 - driver->verify = cpufreq_generic_frequency_table_verify;
40031 - driver->target_index = us3_freq_target;
40032 - driver->get = us3_freq_get;
40033 - driver->exit = us3_freq_cpu_exit;
40034 - strcpy(driver->name, "UltraSPARC-III");
40036 - cpufreq_us3_driver = driver;
40037 - ret = cpufreq_register_driver(driver);
40046 - cpufreq_us3_driver = NULL;
40048 - kfree(us3_freq_table);
40049 - us3_freq_table = NULL;
40052 + impl == PANTHER_IMPL))
40053 + return cpufreq_register_driver(&cpufreq_us3_driver);
40058 static void __exit us3_freq_exit(void)
40060 - if (cpufreq_us3_driver) {
40061 - cpufreq_unregister_driver(cpufreq_us3_driver);
40062 - kfree(cpufreq_us3_driver);
40063 - cpufreq_us3_driver = NULL;
40064 - kfree(us3_freq_table);
40065 - us3_freq_table = NULL;
40067 + cpufreq_unregister_driver(&cpufreq_us3_driver);
40070 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
40071 diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
40072 index 7d4a315..21bb886 100644
40073 --- a/drivers/cpufreq/speedstep-centrino.c
40074 +++ b/drivers/cpufreq/speedstep-centrino.c
40075 @@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
40076 !cpu_has(cpu, X86_FEATURE_EST))
40079 - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
40080 - centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40081 + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
40082 + pax_open_kernel();
40083 + *(u8 *)¢rino_driver.flags |= CPUFREQ_CONST_LOOPS;
40084 + pax_close_kernel();
40087 if (policy->cpu != 0)
40089 diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
40090 index 5db1478..e90e25e 100644
40091 --- a/drivers/cpuidle/driver.c
40092 +++ b/drivers/cpuidle/driver.c
40093 @@ -193,7 +193,7 @@ static int poll_idle(struct cpuidle_device *dev,
40095 static void poll_idle_init(struct cpuidle_driver *drv)
40097 - struct cpuidle_state *state = &drv->states[0];
40098 + cpuidle_state_no_const *state = &drv->states[0];
40100 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
40101 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
40102 diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
40103 index fb9f511..213e6cc 100644
40104 --- a/drivers/cpuidle/governor.c
40105 +++ b/drivers/cpuidle/governor.c
40106 @@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
40107 mutex_lock(&cpuidle_lock);
40108 if (__cpuidle_find_governor(gov->name) == NULL) {
40110 - list_add_tail(&gov->governor_list, &cpuidle_governors);
40111 + pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
40112 if (!cpuidle_curr_governor ||
40113 cpuidle_curr_governor->rating < gov->rating)
40114 cpuidle_switch_governor(gov);
40115 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
40116 index 832a2c3..1794080 100644
40117 --- a/drivers/cpuidle/sysfs.c
40118 +++ b/drivers/cpuidle/sysfs.c
40119 @@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
40123 -static struct attribute_group cpuidle_attr_group = {
40124 +static attribute_group_no_const cpuidle_attr_group = {
40125 .attrs = cpuidle_default_attrs,
40128 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
40129 index 8d2a772..33826c9 100644
40130 --- a/drivers/crypto/hifn_795x.c
40131 +++ b/drivers/crypto/hifn_795x.c
40132 @@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
40133 MODULE_PARM_DESC(hifn_pll_ref,
40134 "PLL reference clock (pci[freq] or ext[freq], default ext)");
40136 -static atomic_t hifn_dev_number;
40137 +static atomic_unchecked_t hifn_dev_number;
40139 #define ACRYPTO_OP_DECRYPT 0
40140 #define ACRYPTO_OP_ENCRYPT 1
40141 @@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
40142 goto err_out_disable_pci_device;
40144 snprintf(name, sizeof(name), "hifn%d",
40145 - atomic_inc_return(&hifn_dev_number)-1);
40146 + atomic_inc_return_unchecked(&hifn_dev_number)-1);
40148 err = pci_request_regions(pdev, name);
40150 diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
40151 index 4630709..0a70e46 100644
40152 --- a/drivers/crypto/omap-des.c
40153 +++ b/drivers/crypto/omap-des.c
40154 @@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
40155 dmaengine_terminate_all(dd->dma_lch_in);
40156 dmaengine_terminate_all(dd->dma_lch_out);
40158 - dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
40159 - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
40164 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
40165 index ca1b362..01cae6a 100644
40166 --- a/drivers/devfreq/devfreq.c
40167 +++ b/drivers/devfreq/devfreq.c
40168 @@ -672,7 +672,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
40172 - list_add(&governor->node, &devfreq_governor_list);
40173 + pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
40175 list_for_each_entry(devfreq, &devfreq_list, node) {
40177 @@ -760,7 +760,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
40181 - list_del(&governor->node);
40182 + pax_list_del((struct list_head *)&governor->node);
40184 mutex_unlock(&devfreq_list_lock);
40186 diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
40187 index 10fcaba..326f709 100644
40188 --- a/drivers/dma/sh/shdma-base.c
40189 +++ b/drivers/dma/sh/shdma-base.c
40190 @@ -227,8 +227,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
40191 schan->slave_id = -EINVAL;
40194 - schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
40195 - sdev->desc_size, GFP_KERNEL);
40196 + schan->desc = kcalloc(sdev->desc_size,
40197 + NR_DESCS_PER_CHANNEL, GFP_KERNEL);
40198 if (!schan->desc) {
40201 diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
40202 index 11707df..2ea96f7 100644
40203 --- a/drivers/dma/sh/shdmac.c
40204 +++ b/drivers/dma/sh/shdmac.c
40205 @@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
40209 -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
40210 +static struct notifier_block sh_dmae_nmi_notifier = {
40211 .notifier_call = sh_dmae_nmi_handler,
40213 /* Run before NMI debug handler and KGDB */
40214 diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40215 index 592af5f..bb1d583 100644
40216 --- a/drivers/edac/edac_device.c
40217 +++ b/drivers/edac/edac_device.c
40218 @@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40220 int edac_device_alloc_index(void)
40222 - static atomic_t device_indexes = ATOMIC_INIT(0);
40223 + static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40225 - return atomic_inc_return(&device_indexes) - 1;
40226 + return atomic_inc_return_unchecked(&device_indexes) - 1;
40228 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40230 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40231 index 112d63a..5443a61 100644
40232 --- a/drivers/edac/edac_mc_sysfs.c
40233 +++ b/drivers/edac/edac_mc_sysfs.c
40234 @@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40235 struct dev_ch_attribute {
40236 struct device_attribute attr;
40241 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40242 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40243 diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40244 index 2cf44b4d..6dd2dc7 100644
40245 --- a/drivers/edac/edac_pci.c
40246 +++ b/drivers/edac/edac_pci.c
40249 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40250 static LIST_HEAD(edac_pci_list);
40251 -static atomic_t pci_indexes = ATOMIC_INIT(0);
40252 +static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40255 * edac_pci_alloc_ctl_info
40256 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40258 int edac_pci_alloc_index(void)
40260 - return atomic_inc_return(&pci_indexes) - 1;
40261 + return atomic_inc_return_unchecked(&pci_indexes) - 1;
40263 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40265 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40266 index 24d877f..4e30133 100644
40267 --- a/drivers/edac/edac_pci_sysfs.c
40268 +++ b/drivers/edac/edac_pci_sysfs.c
40269 @@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40270 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40271 static int edac_pci_poll_msec = 1000; /* one second workq period */
40273 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
40274 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40275 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40276 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40278 static struct kobject *edac_pci_top_main_kobj;
40279 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40280 @@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40282 ssize_t(*show) (void *, char *);
40283 ssize_t(*store) (void *, const char *, size_t);
40287 /* Set of show/store abstract level functions for PCI Parity object */
40288 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40289 @@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40290 edac_printk(KERN_CRIT, EDAC_PCI,
40291 "Signaled System Error on %s\n",
40293 - atomic_inc(&pci_nonparity_count);
40294 + atomic_inc_unchecked(&pci_nonparity_count);
40297 if (status & (PCI_STATUS_PARITY)) {
40298 @@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40299 "Master Data Parity Error on %s\n",
40302 - atomic_inc(&pci_parity_count);
40303 + atomic_inc_unchecked(&pci_parity_count);
40306 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40307 @@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40308 "Detected Parity Error on %s\n",
40311 - atomic_inc(&pci_parity_count);
40312 + atomic_inc_unchecked(&pci_parity_count);
40316 @@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40317 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40318 "Signaled System Error on %s\n",
40320 - atomic_inc(&pci_nonparity_count);
40321 + atomic_inc_unchecked(&pci_nonparity_count);
40324 if (status & (PCI_STATUS_PARITY)) {
40325 @@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40326 "Master Data Parity Error on "
40327 "%s\n", pci_name(dev));
40329 - atomic_inc(&pci_parity_count);
40330 + atomic_inc_unchecked(&pci_parity_count);
40333 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40334 @@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40335 "Detected Parity Error on %s\n",
40338 - atomic_inc(&pci_parity_count);
40339 + atomic_inc_unchecked(&pci_parity_count);
40343 @@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40344 if (!check_pci_errors)
40347 - before_count = atomic_read(&pci_parity_count);
40348 + before_count = atomic_read_unchecked(&pci_parity_count);
40350 /* scan all PCI devices looking for a Parity Error on devices and
40352 @@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40353 /* Only if operator has selected panic on PCI Error */
40354 if (edac_pci_get_panic_on_pe()) {
40355 /* If the count is different 'after' from 'before' */
40356 - if (before_count != atomic_read(&pci_parity_count))
40357 + if (before_count != atomic_read_unchecked(&pci_parity_count))
40358 panic("EDAC: PCI Parity Error");
40361 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40362 index c2359a1..8bd119d 100644
40363 --- a/drivers/edac/mce_amd.h
40364 +++ b/drivers/edac/mce_amd.h
40365 @@ -74,7 +74,7 @@ struct amd_decoder_ops {
40366 bool (*mc0_mce)(u16, u8);
40367 bool (*mc1_mce)(u16, u8);
40368 bool (*mc2_mce)(u16, u8);
40372 void amd_report_gart_errors(bool);
40373 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40374 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40375 index 57ea7f4..af06b76 100644
40376 --- a/drivers/firewire/core-card.c
40377 +++ b/drivers/firewire/core-card.c
40378 @@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40379 const struct fw_card_driver *driver,
40380 struct device *device)
40382 - static atomic_t index = ATOMIC_INIT(-1);
40383 + static atomic_unchecked_t index = ATOMIC_INIT(-1);
40385 - card->index = atomic_inc_return(&index);
40386 + card->index = atomic_inc_return_unchecked(&index);
40387 card->driver = driver;
40388 card->device = device;
40389 card->current_tlabel = 0;
40390 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40392 void fw_core_remove_card(struct fw_card *card)
40394 - struct fw_card_driver dummy_driver = dummy_driver_template;
40395 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
40397 card->driver->update_phy_reg(card, 4,
40398 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40399 diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40400 index f9e3aee..269dbdb 100644
40401 --- a/drivers/firewire/core-device.c
40402 +++ b/drivers/firewire/core-device.c
40403 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40404 struct config_rom_attribute {
40405 struct device_attribute attr;
40410 static ssize_t show_immediate(struct device *dev,
40411 struct device_attribute *dattr, char *buf)
40412 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40413 index d6a09b9..18e90dd 100644
40414 --- a/drivers/firewire/core-transaction.c
40415 +++ b/drivers/firewire/core-transaction.c
40417 #include <linux/timer.h>
40418 #include <linux/types.h>
40419 #include <linux/workqueue.h>
40420 +#include <linux/sched.h>
40422 #include <asm/byteorder.h>
40424 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40425 index e1480ff6..1a429bd 100644
40426 --- a/drivers/firewire/core.h
40427 +++ b/drivers/firewire/core.h
40428 @@ -111,6 +111,7 @@ struct fw_card_driver {
40430 int (*stop_iso)(struct fw_iso_context *ctx);
40432 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40434 void fw_card_initialize(struct fw_card *card,
40435 const struct fw_card_driver *driver, struct device *device);
40436 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40437 index f51d376..b118e40 100644
40438 --- a/drivers/firewire/ohci.c
40439 +++ b/drivers/firewire/ohci.c
40440 @@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40441 be32_to_cpu(ohci->next_header));
40444 +#ifndef CONFIG_GRKERNSEC
40445 if (param_remote_dma) {
40446 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40447 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40451 spin_unlock_irq(&ohci->lock);
40453 @@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40454 unsigned long flags;
40457 +#ifndef CONFIG_GRKERNSEC
40458 if (param_remote_dma)
40463 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40464 diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40465 index 94a58a0..f5eba42 100644
40466 --- a/drivers/firmware/dmi-id.c
40467 +++ b/drivers/firmware/dmi-id.c
40469 struct dmi_device_attribute{
40470 struct device_attribute dev_attr;
40474 #define to_dmi_dev_attr(_dev_attr) \
40475 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40477 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40478 index 4fd9961..52d60ce 100644
40479 --- a/drivers/firmware/efi/cper.c
40480 +++ b/drivers/firmware/efi/cper.c
40481 @@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40483 u64 cper_next_record_id(void)
40485 - static atomic64_t seq;
40486 + static atomic64_unchecked_t seq;
40488 - if (!atomic64_read(&seq))
40489 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
40490 + if (!atomic64_read_unchecked(&seq))
40491 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40493 - return atomic64_inc_return(&seq);
40494 + return atomic64_inc_return_unchecked(&seq);
40496 EXPORT_SYMBOL_GPL(cper_next_record_id);
40498 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40499 index e14363d..c3d5d84 100644
40500 --- a/drivers/firmware/efi/efi.c
40501 +++ b/drivers/firmware/efi/efi.c
40502 @@ -159,14 +159,16 @@ static struct attribute_group efi_subsys_attr_group = {
40505 static struct efivars generic_efivars;
40506 -static struct efivar_operations generic_ops;
40507 +static efivar_operations_no_const generic_ops __read_only;
40509 static int generic_ops_register(void)
40511 - generic_ops.get_variable = efi.get_variable;
40512 - generic_ops.set_variable = efi.set_variable;
40513 - generic_ops.get_next_variable = efi.get_next_variable;
40514 - generic_ops.query_variable_store = efi_query_variable_store;
40515 + pax_open_kernel();
40516 + *(void **)&generic_ops.get_variable = efi.get_variable;
40517 + *(void **)&generic_ops.set_variable = efi.set_variable;
40518 + *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40519 + *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40520 + pax_close_kernel();
40522 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40524 diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40525 index 7b2e049..a253334 100644
40526 --- a/drivers/firmware/efi/efivars.c
40527 +++ b/drivers/firmware/efi/efivars.c
40528 @@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40530 create_efivars_bin_attributes(void)
40532 - struct bin_attribute *attr;
40533 + bin_attribute_no_const *attr;
40537 diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40538 index 5c55227..97f4978 100644
40539 --- a/drivers/firmware/efi/runtime-map.c
40540 +++ b/drivers/firmware/efi/runtime-map.c
40541 @@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40545 -static struct kobj_type __refdata map_ktype = {
40546 +static const struct kobj_type __refconst map_ktype = {
40547 .sysfs_ops = &map_attr_ops,
40548 .default_attrs = def_attrs,
40549 .release = map_release,
40550 diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40551 index f1ab05e..ab51228 100644
40552 --- a/drivers/firmware/google/gsmi.c
40553 +++ b/drivers/firmware/google/gsmi.c
40554 @@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40555 return local_hash_64(input, 32);
40558 -static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40559 +static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40561 .ident = "Google Board",
40563 diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40564 index 2f569aa..26e4f39 100644
40565 --- a/drivers/firmware/google/memconsole.c
40566 +++ b/drivers/firmware/google/memconsole.c
40567 @@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40571 -static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40572 +static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40574 .ident = "Google Board",
40576 @@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40577 if (!found_memconsole())
40580 - memconsole_bin_attr.size = memconsole_length;
40581 + pax_open_kernel();
40582 + *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40583 + pax_close_kernel();
40585 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40588 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40589 index cc016c61..d35279e 100644
40590 --- a/drivers/firmware/memmap.c
40591 +++ b/drivers/firmware/memmap.c
40592 @@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40596 -static struct kobj_type __refdata memmap_ktype = {
40597 +static const struct kobj_type __refconst memmap_ktype = {
40598 .release = release_firmware_map_entry,
40599 .sysfs_ops = &memmap_attr_ops,
40600 .default_attrs = def_attrs,
40601 diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40602 index 3cfcfc6..09d6f117 100644
40603 --- a/drivers/gpio/gpio-em.c
40604 +++ b/drivers/gpio/gpio-em.c
40605 @@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40606 struct em_gio_priv *p;
40607 struct resource *io[2], *irq[2];
40608 struct gpio_chip *gpio_chip;
40609 - struct irq_chip *irq_chip;
40610 + irq_chip_no_const *irq_chip;
40611 const char *name = dev_name(&pdev->dev);
40614 diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40615 index 4ba7ed5..1536b5d 100644
40616 --- a/drivers/gpio/gpio-ich.c
40617 +++ b/drivers/gpio/gpio-ich.c
40618 @@ -94,7 +94,7 @@ struct ichx_desc {
40619 * this option allows driver caching written output values
40621 bool use_outlvl_cache;
40627 diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40628 index b232397..a3ccece 100644
40629 --- a/drivers/gpio/gpio-omap.c
40630 +++ b/drivers/gpio/gpio-omap.c
40631 @@ -1137,7 +1137,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40632 const struct omap_gpio_platform_data *pdata;
40633 struct resource *res;
40634 struct gpio_bank *bank;
40635 - struct irq_chip *irqc;
40636 + irq_chip_no_const *irqc;
40639 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40640 diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40641 index 1e14a6c..0442450 100644
40642 --- a/drivers/gpio/gpio-rcar.c
40643 +++ b/drivers/gpio/gpio-rcar.c
40644 @@ -379,7 +379,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40645 struct gpio_rcar_priv *p;
40646 struct resource *io, *irq;
40647 struct gpio_chip *gpio_chip;
40648 - struct irq_chip *irq_chip;
40649 + irq_chip_no_const *irq_chip;
40650 struct device *dev = &pdev->dev;
40651 const char *name = dev_name(dev);
40653 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40654 index c1caa45..f0f97d2 100644
40655 --- a/drivers/gpio/gpio-vr41xx.c
40656 +++ b/drivers/gpio/gpio-vr41xx.c
40657 @@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40658 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40659 maskl, pendl, maskh, pendh);
40661 - atomic_inc(&irq_err_count);
40662 + atomic_inc_unchecked(&irq_err_count);
40666 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40667 index 6bc612b..3932464 100644
40668 --- a/drivers/gpio/gpiolib.c
40669 +++ b/drivers/gpio/gpiolib.c
40670 @@ -558,8 +558,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40673 if (gpiochip->irqchip) {
40674 - gpiochip->irqchip->irq_request_resources = NULL;
40675 - gpiochip->irqchip->irq_release_resources = NULL;
40676 + pax_open_kernel();
40677 + *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40678 + *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40679 + pax_close_kernel();
40680 gpiochip->irqchip = NULL;
40683 @@ -625,8 +627,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40684 gpiochip->irqchip = NULL;
40687 - irqchip->irq_request_resources = gpiochip_irq_reqres;
40688 - irqchip->irq_release_resources = gpiochip_irq_relres;
40690 + pax_open_kernel();
40691 + *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40692 + *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40693 + pax_close_kernel();
40696 * Prepare the mapping since the irqchip shall be orthogonal to
40697 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40698 index 488f51d..301d462 100644
40699 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40700 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40701 @@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40702 enum cache_policy alternate_policy,
40703 void __user *alternate_aperture_base,
40704 uint64_t alternate_aperture_size);
40709 * struct device_queue_manager
40710 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40711 index 5940531..a75b0e5 100644
40712 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40713 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40714 @@ -62,7 +62,7 @@ struct kernel_queue_ops {
40716 void (*submit_packet)(struct kernel_queue *kq);
40717 void (*rollback_packet)(struct kernel_queue *kq);
40721 struct kernel_queue {
40722 struct kernel_queue_ops ops;
40723 diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40724 index 9b23525..65f4110 100644
40725 --- a/drivers/gpu/drm/drm_context.c
40726 +++ b/drivers/gpu/drm/drm_context.c
40727 @@ -53,6 +53,9 @@ struct drm_ctx_list {
40729 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40731 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40734 mutex_lock(&dev->struct_mutex);
40735 idr_remove(&dev->ctx_idr, ctx_handle);
40736 mutex_unlock(&dev->struct_mutex);
40737 @@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40739 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40741 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40744 idr_init(&dev->ctx_idr);
40747 @@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40749 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40751 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40754 mutex_lock(&dev->struct_mutex);
40755 idr_destroy(&dev->ctx_idr);
40756 mutex_unlock(&dev->struct_mutex);
40757 @@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40759 struct drm_ctx_list *pos, *tmp;
40761 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40764 mutex_lock(&dev->ctxlist_mutex);
40766 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40767 if (pos->tag == file &&
40768 - pos->handle != DRM_KERNEL_CONTEXT) {
40769 + _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40770 if (dev->driver->context_dtor)
40771 dev->driver->context_dtor(dev, pos->handle);
40773 @@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40774 struct drm_local_map *map;
40775 struct drm_map_list *_entry;
40777 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40780 mutex_lock(&dev->struct_mutex);
40782 map = idr_find(&dev->ctx_idr, request->ctx_id);
40783 @@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40784 struct drm_local_map *map = NULL;
40785 struct drm_map_list *r_list = NULL;
40787 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40790 mutex_lock(&dev->struct_mutex);
40791 list_for_each_entry(r_list, &dev->maplist, head) {
40793 @@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40795 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40797 - if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40798 + if (file_priv->master->lock.hw_lock == NULL) {
40800 + "Device has been unregistered. Hard exit. Process %d\n",
40801 + task_pid_nr(current));
40802 + send_sig(SIGTERM, current, 0);
40804 + } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40805 DRM_ERROR("Lock isn't held after context switch\n");
40808 @@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40809 struct drm_ctx ctx;
40812 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40815 if (res->count >= DRM_RESERVED_CONTEXTS) {
40816 memset(&ctx, 0, sizeof(ctx));
40817 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40818 @@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40819 struct drm_ctx_list *ctx_entry;
40820 struct drm_ctx *ctx = data;
40822 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40825 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40826 - if (ctx->handle == DRM_KERNEL_CONTEXT) {
40827 + if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40828 /* Skip kernel's context and get a new one. */
40829 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40831 @@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40833 struct drm_ctx *ctx = data;
40835 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40838 /* This is 0, because we don't handle any context flags */
40841 @@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40843 struct drm_ctx *ctx = data;
40845 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40848 DRM_DEBUG("%d\n", ctx->handle);
40849 return drm_context_switch(dev, dev->last_context, ctx->handle);
40851 @@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40853 struct drm_ctx *ctx = data;
40855 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40858 DRM_DEBUG("%d\n", ctx->handle);
40859 drm_context_switch_complete(dev, file_priv, ctx->handle);
40861 @@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40863 struct drm_ctx *ctx = data;
40865 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40868 DRM_DEBUG("%d\n", ctx->handle);
40869 - if (ctx->handle != DRM_KERNEL_CONTEXT) {
40870 + if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40871 if (dev->driver->context_dtor)
40872 dev->driver->context_dtor(dev, ctx->handle);
40873 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40874 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40875 index 3007b44..420b4a3 100644
40876 --- a/drivers/gpu/drm/drm_crtc.c
40877 +++ b/drivers/gpu/drm/drm_crtc.c
40878 @@ -4176,7 +4176,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40882 - if (copy_to_user(&enum_ptr[copied].name,
40883 + if (copy_to_user(enum_ptr[copied].name,
40884 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40887 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40888 index 48f7359..8c3b594 100644
40889 --- a/drivers/gpu/drm/drm_drv.c
40890 +++ b/drivers/gpu/drm/drm_drv.c
40891 @@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40893 drm_device_set_unplugged(dev);
40895 - if (dev->open_count == 0) {
40896 + if (local_read(&dev->open_count) == 0) {
40899 mutex_unlock(&drm_global_mutex);
40900 @@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40901 if (drm_ht_create(&dev->map_hash, 12))
40904 - ret = drm_legacy_ctxbitmap_init(dev);
40906 - DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40908 + if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40909 + ret = drm_legacy_ctxbitmap_init(dev);
40912 + "Cannot allocate memory for context bitmap.\n");
40917 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40918 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40919 index 076dd60..e4a4ba7 100644
40920 --- a/drivers/gpu/drm/drm_fops.c
40921 +++ b/drivers/gpu/drm/drm_fops.c
40922 @@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40923 return PTR_ERR(minor);
40926 - if (!dev->open_count++)
40927 + if (local_inc_return(&dev->open_count) == 1)
40930 /* share address_space across all char-devs of a single device */
40931 @@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40935 - dev->open_count--;
40936 + local_dec(&dev->open_count);
40937 drm_minor_release(minor);
40940 @@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40942 mutex_lock(&drm_global_mutex);
40944 - DRM_DEBUG("open_count = %d\n", dev->open_count);
40945 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40947 mutex_lock(&dev->struct_mutex);
40948 list_del(&file_priv->lhead);
40949 @@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40950 * Begin inline drm_release
40953 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40954 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40955 task_pid_nr(current),
40956 (long)old_encode_dev(file_priv->minor->kdev->devt),
40957 - dev->open_count);
40958 + local_read(&dev->open_count));
40960 /* Release any auth tokens that might point to this file_priv,
40961 (do that under the drm_global_mutex) */
40962 @@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40963 * End inline drm_release
40966 - if (!--dev->open_count) {
40967 + if (local_dec_and_test(&dev->open_count)) {
40968 retcode = drm_lastclose(dev);
40969 if (drm_device_is_unplugged(dev))
40971 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40972 index 3d2e91c..d31c4c9 100644
40973 --- a/drivers/gpu/drm/drm_global.c
40974 +++ b/drivers/gpu/drm/drm_global.c
40976 struct drm_global_item {
40977 struct mutex mutex;
40980 + atomic_t refcount;
40983 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40984 @@ -49,7 +49,7 @@ void drm_global_init(void)
40985 struct drm_global_item *item = &glob[i];
40986 mutex_init(&item->mutex);
40987 item->object = NULL;
40988 - item->refcount = 0;
40989 + atomic_set(&item->refcount, 0);
40993 @@ -59,7 +59,7 @@ void drm_global_release(void)
40994 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40995 struct drm_global_item *item = &glob[i];
40996 BUG_ON(item->object != NULL);
40997 - BUG_ON(item->refcount != 0);
40998 + BUG_ON(atomic_read(&item->refcount) != 0);
41002 @@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41003 struct drm_global_item *item = &glob[ref->global_type];
41005 mutex_lock(&item->mutex);
41006 - if (item->refcount == 0) {
41007 + if (atomic_read(&item->refcount) == 0) {
41008 item->object = kzalloc(ref->size, GFP_KERNEL);
41009 if (unlikely(item->object == NULL)) {
41011 @@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41015 - ++item->refcount;
41016 + atomic_inc(&item->refcount);
41017 ref->object = item->object;
41018 mutex_unlock(&item->mutex);
41020 @@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41021 struct drm_global_item *item = &glob[ref->global_type];
41023 mutex_lock(&item->mutex);
41024 - BUG_ON(item->refcount == 0);
41025 + BUG_ON(atomic_read(&item->refcount) == 0);
41026 BUG_ON(ref->object != item->object);
41027 - if (--item->refcount == 0) {
41028 + if (atomic_dec_and_test(&item->refcount)) {
41030 item->object = NULL;
41032 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41033 index cbb4fc0..5c756cb9 100644
41034 --- a/drivers/gpu/drm/drm_info.c
41035 +++ b/drivers/gpu/drm/drm_info.c
41036 @@ -77,10 +77,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41037 struct drm_local_map *map;
41038 struct drm_map_list *r_list;
41040 - /* Hardcoded from _DRM_FRAME_BUFFER,
41041 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41042 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41043 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41044 + static const char * const types[] = {
41045 + [_DRM_FRAME_BUFFER] = "FB",
41046 + [_DRM_REGISTERS] = "REG",
41047 + [_DRM_SHM] = "SHM",
41048 + [_DRM_AGP] = "AGP",
41049 + [_DRM_SCATTER_GATHER] = "SG",
41050 + [_DRM_CONSISTENT] = "PCI"};
41054 @@ -91,7 +94,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41058 - if (map->type < 0 || map->type > 5)
41059 + if (map->type >= ARRAY_SIZE(types))
41062 type = types[map->type];
41063 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
41064 index aa8bbb4..0f62630 100644
41065 --- a/drivers/gpu/drm/drm_ioc32.c
41066 +++ b/drivers/gpu/drm/drm_ioc32.c
41067 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
41068 request = compat_alloc_user_space(nbytes);
41069 if (!access_ok(VERIFY_WRITE, request, nbytes))
41071 - list = (struct drm_buf_desc *) (request + 1);
41072 + list = (struct drm_buf_desc __user *) (request + 1);
41074 if (__put_user(count, &request->count)
41075 || __put_user(list, &request->list))
41076 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
41077 request = compat_alloc_user_space(nbytes);
41078 if (!access_ok(VERIFY_WRITE, request, nbytes))
41080 - list = (struct drm_buf_pub *) (request + 1);
41081 + list = (struct drm_buf_pub __user *) (request + 1);
41083 if (__put_user(count, &request->count)
41084 || __put_user(list, &request->list))
41085 @@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
41089 -static drm_ioctl_compat_t *drm_compat_ioctls[] = {
41090 +static drm_ioctl_compat_t drm_compat_ioctls[] = {
41091 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
41092 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
41093 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
41094 @@ -1062,7 +1062,6 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
41095 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41097 unsigned int nr = DRM_IOCTL_NR(cmd);
41098 - drm_ioctl_compat_t *fn;
41101 /* Assume that ioctls without an explicit compat routine will just
41102 @@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41103 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
41104 return drm_ioctl(filp, cmd, arg);
41106 - fn = drm_compat_ioctls[nr];
41109 - ret = (*fn) (filp, cmd, arg);
41110 + if (drm_compat_ioctls[nr] != NULL)
41111 + ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
41113 ret = drm_ioctl(filp, cmd, arg);
41115 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
41116 index 266dcd6..d0194d9 100644
41117 --- a/drivers/gpu/drm/drm_ioctl.c
41118 +++ b/drivers/gpu/drm/drm_ioctl.c
41119 @@ -663,7 +663,7 @@ long drm_ioctl(struct file *filp,
41120 struct drm_file *file_priv = filp->private_data;
41121 struct drm_device *dev;
41122 const struct drm_ioctl_desc *ioctl = NULL;
41123 - drm_ioctl_t *func;
41124 + drm_ioctl_no_const_t func;
41125 unsigned int nr = DRM_IOCTL_NR(cmd);
41126 int retcode = -EINVAL;
41127 char stack_kdata[128];
41128 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
41129 index f861361..b61d4c7 100644
41130 --- a/drivers/gpu/drm/drm_lock.c
41131 +++ b/drivers/gpu/drm/drm_lock.c
41132 @@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
41133 struct drm_master *master = file_priv->master;
41136 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41139 ++file_priv->lock_count;
41141 - if (lock->context == DRM_KERNEL_CONTEXT) {
41142 + if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41143 DRM_ERROR("Process %d using kernel context %d\n",
41144 task_pid_nr(current), lock->context);
41146 @@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
41147 struct drm_lock *lock = data;
41148 struct drm_master *master = file_priv->master;
41150 - if (lock->context == DRM_KERNEL_CONTEXT) {
41151 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41154 + if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41155 DRM_ERROR("Process %d using kernel context %d\n",
41156 task_pid_nr(current), lock->context);
41160 + if (!master->lock.hw_lock) {
41162 + "Device has been unregistered. Hard exit. Process %d\n",
41163 + task_pid_nr(current));
41164 + send_sig(SIGTERM, current, 0);
41168 if (drm_legacy_lock_free(&master->lock, lock->context)) {
41169 /* FIXME: Should really bail out here. */
41171 diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41172 index d4813e0..6c1ab4d 100644
41173 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41174 +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41175 @@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
41176 u32 pipeconf_reg = PIPEACONF;
41177 u32 dspcntr_reg = DSPACNTR;
41179 - u32 pipeconf = dev_priv->pipeconf[pipe];
41180 - u32 dspcntr = dev_priv->dspcntr[pipe];
41183 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41188 + pipeconf = dev_priv->pipeconf[pipe];
41189 + dspcntr = dev_priv->dspcntr[pipe];
41192 pipeconf_reg = PIPECCONF;
41193 dspcntr_reg = DSPCCNTR;
41194 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41195 index 93ec5dc..82acbaf 100644
41196 --- a/drivers/gpu/drm/i810/i810_drv.h
41197 +++ b/drivers/gpu/drm/i810/i810_drv.h
41198 @@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41201 wait_queue_head_t irq_queue;
41202 - atomic_t irq_received;
41203 - atomic_t irq_emitted;
41204 + atomic_unchecked_t irq_received;
41205 + atomic_unchecked_t irq_emitted;
41208 } drm_i810_private_t;
41209 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41210 index 68e0c85..3303192 100644
41211 --- a/drivers/gpu/drm/i915/i915_dma.c
41212 +++ b/drivers/gpu/drm/i915/i915_dma.c
41213 @@ -162,6 +162,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
41214 value = INTEL_INFO(dev)->eu_total;
41217 + case I915_PARAM_HAS_LEGACY_CONTEXT:
41218 + value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41221 DRM_DEBUG("Unknown parameter %d\n", param->param);
41222 @@ -376,7 +378,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41223 * locking inversion with the driver load path. And the access here is
41224 * completely racy anyway. So don't bother with locking for now.
41226 - return dev->open_count == 0;
41227 + return local_read(&dev->open_count) == 0;
41230 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41231 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41232 index a3190e79..86b06cb 100644
41233 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41234 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41235 @@ -936,12 +936,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41237 validate_exec_list(struct drm_device *dev,
41238 struct drm_i915_gem_exec_object2 *exec,
41240 + unsigned int count)
41242 unsigned relocs_total = 0;
41243 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41244 unsigned invalid_flags;
41248 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41249 if (USES_FULL_PPGTT(dev))
41250 diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41251 index 176de63..b50b66a 100644
41252 --- a/drivers/gpu/drm/i915/i915_ioc32.c
41253 +++ b/drivers/gpu/drm/i915/i915_ioc32.c
41254 @@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41255 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41256 || __put_user(batchbuffer32.num_cliprects,
41257 &batchbuffer->num_cliprects)
41258 - || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41259 + || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41260 &batchbuffer->cliprects))
41263 @@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41265 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41266 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41267 - || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41268 + || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41270 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41271 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41272 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41273 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41274 - || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41275 + || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41276 &cmdbuffer->cliprects))
41279 @@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41280 (unsigned long)request);
41283 -static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41284 +static drm_ioctl_compat_t i915_compat_ioctls[] = {
41285 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41286 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41287 [DRM_I915_GETPARAM] = compat_i915_getparam,
41288 @@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41289 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41291 unsigned int nr = DRM_IOCTL_NR(cmd);
41292 - drm_ioctl_compat_t *fn = NULL;
41295 if (nr < DRM_COMMAND_BASE)
41296 return drm_compat_ioctl(filp, cmd, arg);
41298 - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41299 - fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41302 - ret = (*fn) (filp, cmd, arg);
41303 + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41304 + ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41306 ret = drm_ioctl(filp, cmd, arg);
41308 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41309 index d0f3cbc..f3ab4cc 100644
41310 --- a/drivers/gpu/drm/i915/intel_display.c
41311 +++ b/drivers/gpu/drm/i915/intel_display.c
41312 @@ -13604,13 +13604,13 @@ struct intel_quirk {
41313 int subsystem_vendor;
41314 int subsystem_device;
41315 void (*hook)(struct drm_device *dev);
41319 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41320 struct intel_dmi_quirk {
41321 void (*hook)(struct drm_device *dev);
41322 const struct dmi_system_id (*dmi_id_list)[];
41326 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41328 @@ -13618,18 +13618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41332 -static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41333 +static const struct dmi_system_id intel_dmi_quirks_table[] = {
41335 - .dmi_id_list = &(const struct dmi_system_id[]) {
41337 - .callback = intel_dmi_reverse_brightness,
41338 - .ident = "NCR Corporation",
41339 - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41340 - DMI_MATCH(DMI_PRODUCT_NAME, ""),
41343 - { } /* terminating entry */
41344 + .callback = intel_dmi_reverse_brightness,
41345 + .ident = "NCR Corporation",
41346 + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41347 + DMI_MATCH(DMI_PRODUCT_NAME, ""),
41350 + { } /* terminating entry */
41353 +static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41355 + .dmi_id_list = &intel_dmi_quirks_table,
41356 .hook = quirk_invert_brightness,
41359 diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41360 index 74f505b..21f6914 100644
41361 --- a/drivers/gpu/drm/imx/imx-drm-core.c
41362 +++ b/drivers/gpu/drm/imx/imx-drm-core.c
41363 @@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41364 if (imxdrm->pipes >= MAX_CRTC)
41367 - if (imxdrm->drm->open_count)
41368 + if (local_read(&imxdrm->drm->open_count))
41371 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41372 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41373 index b4a20149..219ab78 100644
41374 --- a/drivers/gpu/drm/mga/mga_drv.h
41375 +++ b/drivers/gpu/drm/mga/mga_drv.h
41376 @@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41380 - atomic_t vbl_received; /**< Number of vblanks received. */
41381 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41382 wait_queue_head_t fence_queue;
41383 - atomic_t last_fence_retired;
41384 + atomic_unchecked_t last_fence_retired;
41385 u32 next_fence_to_post;
41387 unsigned int fb_cpp;
41388 diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41389 index 729bfd5..14bae78 100644
41390 --- a/drivers/gpu/drm/mga/mga_ioc32.c
41391 +++ b/drivers/gpu/drm/mga/mga_ioc32.c
41392 @@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41396 -drm_ioctl_compat_t *mga_compat_ioctls[] = {
41397 +drm_ioctl_compat_t mga_compat_ioctls[] = {
41398 [DRM_MGA_INIT] = compat_mga_init,
41399 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41400 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41401 @@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41402 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41404 unsigned int nr = DRM_IOCTL_NR(cmd);
41405 - drm_ioctl_compat_t *fn = NULL;
41408 if (nr < DRM_COMMAND_BASE)
41409 return drm_compat_ioctl(filp, cmd, arg);
41411 - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41412 - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41415 - ret = (*fn) (filp, cmd, arg);
41416 + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41417 + ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41419 ret = drm_ioctl(filp, cmd, arg);
41421 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41422 index 1b071b8..de8601a 100644
41423 --- a/drivers/gpu/drm/mga/mga_irq.c
41424 +++ b/drivers/gpu/drm/mga/mga_irq.c
41425 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41429 - return atomic_read(&dev_priv->vbl_received);
41430 + return atomic_read_unchecked(&dev_priv->vbl_received);
41434 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41435 /* VBLANK interrupt */
41436 if (status & MGA_VLINEPEN) {
41437 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41438 - atomic_inc(&dev_priv->vbl_received);
41439 + atomic_inc_unchecked(&dev_priv->vbl_received);
41440 drm_handle_vblank(dev, 0);
41443 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41444 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41445 MGA_WRITE(MGA_PRIMEND, prim_end);
41447 - atomic_inc(&dev_priv->last_fence_retired);
41448 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
41449 wake_up(&dev_priv->fence_queue);
41452 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41455 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41456 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41457 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41458 - *sequence) <= (1 << 23)));
41460 *sequence = cur_fence;
41461 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41462 index 0190b69..60c3eaf 100644
41463 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41464 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41465 @@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41468 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41472 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41474 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41475 index 8904933..9624b38 100644
41476 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41477 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41478 @@ -941,7 +941,8 @@ static struct drm_driver
41482 - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41483 + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41484 + DRIVER_KMS_LEGACY_CONTEXT,
41486 .load = nouveau_drm_load,
41487 .unload = nouveau_drm_unload,
41488 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41489 index dd72652..1fd2368 100644
41490 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41491 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41492 @@ -123,7 +123,6 @@ struct nouveau_drm {
41493 struct drm_global_reference mem_global_ref;
41494 struct ttm_bo_global_ref bo_global_ref;
41495 struct ttm_bo_device bdev;
41496 - atomic_t validate_sequence;
41497 int (*move)(struct nouveau_channel *,
41498 struct ttm_buffer_object *,
41499 struct ttm_mem_reg *, struct ttm_mem_reg *);
41500 diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41501 index 462679a..88e32a7 100644
41502 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41503 +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41504 @@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41507 unsigned int nr = DRM_IOCTL_NR(cmd);
41508 - drm_ioctl_compat_t *fn = NULL;
41509 + drm_ioctl_compat_t fn = NULL;
41512 if (nr < DRM_COMMAND_BASE)
41513 diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41514 index 18f4497..10f6025 100644
41515 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41516 +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41517 @@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41520 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41521 - nouveau_vram_manager_init,
41522 - nouveau_vram_manager_fini,
41523 - nouveau_vram_manager_new,
41524 - nouveau_vram_manager_del,
41525 - nouveau_vram_manager_debug
41526 + .init = nouveau_vram_manager_init,
41527 + .takedown = nouveau_vram_manager_fini,
41528 + .get_node = nouveau_vram_manager_new,
41529 + .put_node = nouveau_vram_manager_del,
41530 + .debug = nouveau_vram_manager_debug
41534 @@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41537 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41538 - nouveau_gart_manager_init,
41539 - nouveau_gart_manager_fini,
41540 - nouveau_gart_manager_new,
41541 - nouveau_gart_manager_del,
41542 - nouveau_gart_manager_debug
41543 + .init = nouveau_gart_manager_init,
41544 + .takedown = nouveau_gart_manager_fini,
41545 + .get_node = nouveau_gart_manager_new,
41546 + .put_node = nouveau_gart_manager_del,
41547 + .debug = nouveau_gart_manager_debug
41551 @@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41554 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41555 - nv04_gart_manager_init,
41556 - nv04_gart_manager_fini,
41557 - nv04_gart_manager_new,
41558 - nv04_gart_manager_del,
41559 - nv04_gart_manager_debug
41560 + .init = nv04_gart_manager_init,
41561 + .takedown = nv04_gart_manager_fini,
41562 + .get_node = nv04_gart_manager_new,
41563 + .put_node = nv04_gart_manager_del,
41564 + .debug = nv04_gart_manager_debug
41568 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41569 index c7592ec..dd45ebc 100644
41570 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41571 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41572 @@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41573 * locking inversion with the driver load path. And the access here is
41574 * completely racy anyway. So don't bother with locking for now.
41576 - return dev->open_count == 0;
41577 + return local_read(&dev->open_count) == 0;
41580 static const struct vga_switcheroo_client_ops
41581 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41582 index 9782364..89bd954 100644
41583 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
41584 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41585 @@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41588 mutex_lock(&qdev->async_io_mutex);
41589 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
41590 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41591 if (qdev->last_sent_io_cmd > irq_num) {
41593 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41594 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41595 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41597 ret = wait_event_timeout(qdev->io_cmd_event,
41598 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41599 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41600 /* 0 is timeout, just bail the "hw" has gone away */
41603 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
41604 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41607 qdev->last_sent_io_cmd = irq_num + 1;
41609 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41610 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41611 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41613 ret = wait_event_timeout(qdev->io_cmd_event,
41614 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41615 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41619 diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41620 index 6911b8c..89d6867 100644
41621 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41622 +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41623 @@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41624 struct drm_info_node *node = (struct drm_info_node *) m->private;
41625 struct qxl_device *qdev = node->minor->dev->dev_private;
41627 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41628 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41629 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41630 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41631 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41632 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41633 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41634 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41635 seq_printf(m, "%d\n", qdev->irq_received_error);
41638 diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41639 index 7c6cafe..460f542 100644
41640 --- a/drivers/gpu/drm/qxl/qxl_drv.h
41641 +++ b/drivers/gpu/drm/qxl/qxl_drv.h
41642 @@ -290,10 +290,10 @@ struct qxl_device {
41643 unsigned int last_sent_io_cmd;
41645 /* interrupt handling */
41646 - atomic_t irq_received;
41647 - atomic_t irq_received_display;
41648 - atomic_t irq_received_cursor;
41649 - atomic_t irq_received_io_cmd;
41650 + atomic_unchecked_t irq_received;
41651 + atomic_unchecked_t irq_received_display;
41652 + atomic_unchecked_t irq_received_cursor;
41653 + atomic_unchecked_t irq_received_io_cmd;
41654 unsigned irq_received_error;
41655 wait_queue_head_t display_event;
41656 wait_queue_head_t cursor_event;
41657 diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41658 index b110883..dd06418 100644
41659 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41660 +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41661 @@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41663 /* TODO copy slow path code from i915 */
41664 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41665 - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41666 + unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41669 struct qxl_drawable *draw = fb_cmd;
41670 @@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41671 struct drm_qxl_reloc reloc;
41673 if (copy_from_user(&reloc,
41674 - &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41675 + &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41679 @@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41681 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41683 - struct drm_qxl_command *commands =
41684 - (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41685 + struct drm_qxl_command __user *commands =
41686 + (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41688 - if (copy_from_user(&user_cmd, &commands[cmd_num],
41689 + if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41693 diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41694 index 0bf1e20..42a7310 100644
41695 --- a/drivers/gpu/drm/qxl/qxl_irq.c
41696 +++ b/drivers/gpu/drm/qxl/qxl_irq.c
41697 @@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41701 - atomic_inc(&qdev->irq_received);
41702 + atomic_inc_unchecked(&qdev->irq_received);
41704 if (pending & QXL_INTERRUPT_DISPLAY) {
41705 - atomic_inc(&qdev->irq_received_display);
41706 + atomic_inc_unchecked(&qdev->irq_received_display);
41707 wake_up_all(&qdev->display_event);
41708 qxl_queue_garbage_collect(qdev, false);
41710 if (pending & QXL_INTERRUPT_CURSOR) {
41711 - atomic_inc(&qdev->irq_received_cursor);
41712 + atomic_inc_unchecked(&qdev->irq_received_cursor);
41713 wake_up_all(&qdev->cursor_event);
41715 if (pending & QXL_INTERRUPT_IO_CMD) {
41716 - atomic_inc(&qdev->irq_received_io_cmd);
41717 + atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41718 wake_up_all(&qdev->io_cmd_event);
41720 if (pending & QXL_INTERRUPT_ERROR) {
41721 @@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41722 init_waitqueue_head(&qdev->io_cmd_event);
41723 INIT_WORK(&qdev->client_monitors_config_work,
41724 qxl_client_monitors_config_work_func);
41725 - atomic_set(&qdev->irq_received, 0);
41726 - atomic_set(&qdev->irq_received_display, 0);
41727 - atomic_set(&qdev->irq_received_cursor, 0);
41728 - atomic_set(&qdev->irq_received_io_cmd, 0);
41729 + atomic_set_unchecked(&qdev->irq_received, 0);
41730 + atomic_set_unchecked(&qdev->irq_received_display, 0);
41731 + atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41732 + atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41733 qdev->irq_received_error = 0;
41734 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41735 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41736 diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41737 index 0cbc4c9..0e46686 100644
41738 --- a/drivers/gpu/drm/qxl/qxl_ttm.c
41739 +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41740 @@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41744 -static struct vm_operations_struct qxl_ttm_vm_ops;
41745 +static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41746 static const struct vm_operations_struct *ttm_vm_ops;
41748 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41749 @@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41751 if (unlikely(ttm_vm_ops == NULL)) {
41752 ttm_vm_ops = vma->vm_ops;
41753 + pax_open_kernel();
41754 qxl_ttm_vm_ops = *ttm_vm_ops;
41755 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41756 + pax_close_kernel();
41758 vma->vm_ops = &qxl_ttm_vm_ops;
41760 @@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41761 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41763 #if defined(CONFIG_DEBUG_FS)
41764 - static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41765 - static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41767 + static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41769 + .name = "qxl_mem_mm",
41770 + .show = &qxl_mm_dump_table,
41773 + .name = "qxl_surf_mm",
41774 + .show = &qxl_mm_dump_table,
41778 - for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41780 - sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41782 - sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41783 - qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41784 - qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41785 - qxl_mem_types_list[i].driver_features = 0;
41787 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41789 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41790 + pax_open_kernel();
41791 + *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41792 + *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41793 + pax_close_kernel();
41796 - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41797 + return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41801 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41802 index 2c45ac9..5d740f8 100644
41803 --- a/drivers/gpu/drm/r128/r128_cce.c
41804 +++ b/drivers/gpu/drm/r128/r128_cce.c
41805 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41807 /* GH: Simple idle check.
41809 - atomic_set(&dev_priv->idle_count, 0);
41810 + atomic_set_unchecked(&dev_priv->idle_count, 0);
41812 /* We don't support anything other than bus-mastering ring mode,
41813 * but the ring can be in either AGP or PCI space for the ring
41814 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41815 index 723e5d6..102dbaf 100644
41816 --- a/drivers/gpu/drm/r128/r128_drv.h
41817 +++ b/drivers/gpu/drm/r128/r128_drv.h
41818 @@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41820 unsigned long cce_buffers_offset;
41822 - atomic_t idle_count;
41823 + atomic_unchecked_t idle_count;
41828 u32 crtc_offset_cntl;
41830 - atomic_t vbl_received;
41831 + atomic_unchecked_t vbl_received;
41834 unsigned int front_offset;
41835 diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41836 index 663f38c..ec159a1 100644
41837 --- a/drivers/gpu/drm/r128/r128_ioc32.c
41838 +++ b/drivers/gpu/drm/r128/r128_ioc32.c
41839 @@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41840 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41843 -drm_ioctl_compat_t *r128_compat_ioctls[] = {
41844 +drm_ioctl_compat_t r128_compat_ioctls[] = {
41845 [DRM_R128_INIT] = compat_r128_init,
41846 [DRM_R128_DEPTH] = compat_r128_depth,
41847 [DRM_R128_STIPPLE] = compat_r128_stipple,
41848 @@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41849 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41851 unsigned int nr = DRM_IOCTL_NR(cmd);
41852 - drm_ioctl_compat_t *fn = NULL;
41855 if (nr < DRM_COMMAND_BASE)
41856 return drm_compat_ioctl(filp, cmd, arg);
41858 - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41859 - fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41862 - ret = (*fn) (filp, cmd, arg);
41863 + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41864 + ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41866 ret = drm_ioctl(filp, cmd, arg);
41868 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41869 index c2ae496..30b5993 100644
41870 --- a/drivers/gpu/drm/r128/r128_irq.c
41871 +++ b/drivers/gpu/drm/r128/r128_irq.c
41872 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41876 - return atomic_read(&dev_priv->vbl_received);
41877 + return atomic_read_unchecked(&dev_priv->vbl_received);
41880 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41881 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41882 /* VBLANK interrupt */
41883 if (status & R128_CRTC_VBLANK_INT) {
41884 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41885 - atomic_inc(&dev_priv->vbl_received);
41886 + atomic_inc_unchecked(&dev_priv->vbl_received);
41887 drm_handle_vblank(dev, 0);
41888 return IRQ_HANDLED;
41890 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41891 index 8fd2d9f..18c9660 100644
41892 --- a/drivers/gpu/drm/r128/r128_state.c
41893 +++ b/drivers/gpu/drm/r128/r128_state.c
41894 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41896 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41898 - if (atomic_read(&dev_priv->idle_count) == 0)
41899 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41900 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41902 - atomic_set(&dev_priv->idle_count, 0);
41903 + atomic_set_unchecked(&dev_priv->idle_count, 0);
41907 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41908 index b928c17..e5d9400 100644
41909 --- a/drivers/gpu/drm/radeon/mkregtable.c
41910 +++ b/drivers/gpu/drm/radeon/mkregtable.c
41911 @@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41913 regmatch_t match[4];
41921 struct offset *offset;
41922 char last_reg_s[10];
41924 + unsigned long last_reg;
41927 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41928 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41929 index a7fdfa4..04a3964 100644
41930 --- a/drivers/gpu/drm/radeon/radeon_device.c
41931 +++ b/drivers/gpu/drm/radeon/radeon_device.c
41932 @@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41933 * locking inversion with the driver load path. And the access here is
41934 * completely racy anyway. So don't bother with locking for now.
41936 - return dev->open_count == 0;
41937 + return local_read(&dev->open_count) == 0;
41940 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41941 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41942 index 46bd393..6ae4719 100644
41943 --- a/drivers/gpu/drm/radeon/radeon_drv.h
41944 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
41945 @@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41948 wait_queue_head_t swi_queue;
41949 - atomic_t swi_emitted;
41950 + atomic_unchecked_t swi_emitted;
41952 uint32_t irq_enable_reg;
41953 uint32_t r500_disp_irq_reg;
41954 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41955 index 0b98ea1..a3c770f 100644
41956 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41957 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41958 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41959 request = compat_alloc_user_space(sizeof(*request));
41960 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41961 || __put_user(req32.param, &request->param)
41962 - || __put_user((void __user *)(unsigned long)req32.value,
41963 + || __put_user((unsigned long)req32.value,
41967 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41968 #define compat_radeon_cp_setparam NULL
41969 #endif /* X86_64 || IA64 */
41971 -static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41972 +static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41973 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41974 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41975 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41976 @@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41977 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41979 unsigned int nr = DRM_IOCTL_NR(cmd);
41980 - drm_ioctl_compat_t *fn = NULL;
41983 if (nr < DRM_COMMAND_BASE)
41984 return drm_compat_ioctl(filp, cmd, arg);
41986 - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41987 - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41990 - ret = (*fn) (filp, cmd, arg);
41991 + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
41992 + ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41994 ret = drm_ioctl(filp, cmd, arg);
41996 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41997 index 244b19b..c19226d 100644
41998 --- a/drivers/gpu/drm/radeon/radeon_irq.c
41999 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
42000 @@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42004 - atomic_inc(&dev_priv->swi_emitted);
42005 - ret = atomic_read(&dev_priv->swi_emitted);
42006 + atomic_inc_unchecked(&dev_priv->swi_emitted);
42007 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42010 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42011 @@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42012 drm_radeon_private_t *dev_priv =
42013 (drm_radeon_private_t *) dev->dev_private;
42015 - atomic_set(&dev_priv->swi_emitted, 0);
42016 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42017 init_waitqueue_head(&dev_priv->swi_queue);
42019 dev->max_vblank_count = 0x001fffff;
42020 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42021 index 15aee72..cda326e 100644
42022 --- a/drivers/gpu/drm/radeon/radeon_state.c
42023 +++ b/drivers/gpu/drm/radeon/radeon_state.c
42024 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42025 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42026 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42028 - if (copy_from_user(&depth_boxes, clear->depth_boxes,
42029 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42030 sarea_priv->nbox * sizeof(depth_boxes[0])))
42033 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42035 drm_radeon_private_t *dev_priv = dev->dev_private;
42036 drm_radeon_getparam_t *param = data;
42040 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42042 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42043 index edafd3c..3af7c9c 100644
42044 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
42045 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42046 @@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42047 man->size = size >> PAGE_SHIFT;
42050 -static struct vm_operations_struct radeon_ttm_vm_ops;
42051 +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42052 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42054 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42055 @@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42057 if (unlikely(ttm_vm_ops == NULL)) {
42058 ttm_vm_ops = vma->vm_ops;
42059 + pax_open_kernel();
42060 radeon_ttm_vm_ops = *ttm_vm_ops;
42061 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42062 + pax_close_kernel();
42064 vma->vm_ops = &radeon_ttm_vm_ops;
42066 diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42067 index a287e4f..df1d5dd 100644
42068 --- a/drivers/gpu/drm/tegra/dc.c
42069 +++ b/drivers/gpu/drm/tegra/dc.c
42070 @@ -1594,7 +1594,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42073 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42074 - dc->debugfs_files[i].data = dc;
42075 + *(void **)&dc->debugfs_files[i].data = dc;
42077 err = drm_debugfs_create_files(dc->debugfs_files,
42078 ARRAY_SIZE(debugfs_files),
42079 diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42080 index ed970f6..4eeea42 100644
42081 --- a/drivers/gpu/drm/tegra/dsi.c
42082 +++ b/drivers/gpu/drm/tegra/dsi.c
42083 @@ -62,7 +62,7 @@ struct tegra_dsi {
42084 struct clk *clk_lp;
42087 - struct drm_info_list *debugfs_files;
42088 + drm_info_list_no_const *debugfs_files;
42089 struct drm_minor *minor;
42090 struct dentry *debugfs;
42092 diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42093 index 06ab178..b5324e4 100644
42094 --- a/drivers/gpu/drm/tegra/hdmi.c
42095 +++ b/drivers/gpu/drm/tegra/hdmi.c
42096 @@ -64,7 +64,7 @@ struct tegra_hdmi {
42100 - struct drm_info_list *debugfs_files;
42101 + drm_info_list_no_const *debugfs_files;
42102 struct drm_minor *minor;
42103 struct dentry *debugfs;
42105 diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42106 index aa0bd054..aea6a01 100644
42107 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42108 +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42109 @@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42112 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42114 - ttm_bo_man_takedown,
42115 - ttm_bo_man_get_node,
42116 - ttm_bo_man_put_node,
42118 + .init = ttm_bo_man_init,
42119 + .takedown = ttm_bo_man_takedown,
42120 + .get_node = ttm_bo_man_get_node,
42121 + .put_node = ttm_bo_man_put_node,
42122 + .debug = ttm_bo_man_debug
42124 EXPORT_SYMBOL(ttm_bo_manager_func);
42125 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42126 index a1803fb..c53f6b0 100644
42127 --- a/drivers/gpu/drm/ttm/ttm_memory.c
42128 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
42129 @@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42131 glob->zone_kernel = zone;
42132 ret = kobject_init_and_add(
42133 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42134 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42135 if (unlikely(ret != 0)) {
42136 kobject_put(&zone->kobj);
42138 @@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42140 glob->zone_dma32 = zone;
42141 ret = kobject_init_and_add(
42142 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42143 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42144 if (unlikely(ret != 0)) {
42145 kobject_put(&zone->kobj);
42147 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42148 index 025c429..314062f 100644
42149 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
42150 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42153 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42154 #define SMALL_ALLOCATION 16
42155 -#define FREE_ALL_PAGES (~0U)
42156 +#define FREE_ALL_PAGES (~0UL)
42157 /* times are in msecs */
42158 #define PAGE_FREE_INTERVAL 1000
42160 @@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
42161 * @free_all: If set to true will free all pages in pool
42162 * @use_static: Safe to use static buffer
42164 -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
42165 +static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
42168 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42169 unsigned long irq_flags;
42171 struct page **pages_to_free;
42172 - unsigned freed_pages = 0,
42173 - npages_to_free = nr_free;
42174 + unsigned long freed_pages = 0, npages_to_free = nr_free;
42176 if (NUM_PAGES_TO_ALLOC < nr_free)
42177 npages_to_free = NUM_PAGES_TO_ALLOC;
42178 @@ -371,7 +370,8 @@ restart:
42179 __list_del(&p->lru, &pool->list);
42181 ttm_pool_update_free_locked(pool, freed_pages);
42182 - nr_free -= freed_pages;
42183 + if (likely(nr_free != FREE_ALL_PAGES))
42184 + nr_free -= freed_pages;
42187 spin_unlock_irqrestore(&pool->lock, irq_flags);
42188 @@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42190 unsigned pool_offset;
42191 struct ttm_page_pool *pool;
42192 - int shrink_pages = sc->nr_to_scan;
42193 + unsigned long shrink_pages = sc->nr_to_scan;
42194 unsigned long freed = 0;
42196 if (!mutex_trylock(&lock))
42197 @@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42198 pool_offset = ++start_pool % NUM_POOLS;
42199 /* select start pool in round robin fashion */
42200 for (i = 0; i < NUM_POOLS; ++i) {
42201 - unsigned nr_free = shrink_pages;
42202 + unsigned long nr_free = shrink_pages;
42203 if (shrink_pages == 0)
42205 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42206 @@ -673,7 +673,7 @@ out:
42209 /* Put all pages in pages list to correct pool to wait for reuse */
42210 -static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42211 +static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42212 enum ttm_caching_state cstate)
42214 unsigned long irq_flags;
42215 @@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42216 struct list_head plist;
42217 struct page *p = NULL;
42218 gfp_t gfp_flags = GFP_USER;
42220 + unsigned long count;
42223 /* set zero flag for page allocation if required */
42224 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42225 index 01e1d27..aaa018a 100644
42226 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42227 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42230 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42231 #define SMALL_ALLOCATION 4
42232 -#define FREE_ALL_PAGES (~0U)
42233 +#define FREE_ALL_PAGES (~0UL)
42234 /* times are in msecs */
42235 #define IS_UNDEFINED (0)
42236 #define IS_WC (1<<1)
42237 @@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42238 * @nr_free: If set to true will free all pages in pool
42239 * @use_static: Safe to use static buffer
42241 -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42242 +static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42245 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42246 @@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42247 struct dma_page *dma_p, *tmp;
42248 struct page **pages_to_free;
42249 struct list_head d_pages;
42250 - unsigned freed_pages = 0,
42251 - npages_to_free = nr_free;
42252 + unsigned long freed_pages = 0, npages_to_free = nr_free;
42254 if (NUM_PAGES_TO_ALLOC < nr_free)
42255 npages_to_free = NUM_PAGES_TO_ALLOC;
42256 @@ -499,7 +498,8 @@ restart:
42257 /* remove range of pages from the pool */
42259 ttm_pool_update_free_locked(pool, freed_pages);
42260 - nr_free -= freed_pages;
42261 + if (likely(nr_free != FREE_ALL_PAGES))
42262 + nr_free -= freed_pages;
42265 spin_unlock_irqrestore(&pool->lock, irq_flags);
42266 @@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42267 struct dma_page *d_page, *next;
42268 enum pool_type type;
42269 bool is_cached = false;
42270 - unsigned count = 0, i, npages = 0;
42271 + unsigned long count = 0, i, npages = 0;
42272 unsigned long irq_flags;
42274 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42275 @@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42276 static unsigned start_pool;
42278 unsigned pool_offset;
42279 - unsigned shrink_pages = sc->nr_to_scan;
42280 + unsigned long shrink_pages = sc->nr_to_scan;
42281 struct device_pools *p;
42282 unsigned long freed = 0;
42284 @@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42286 pool_offset = ++start_pool % _manager->npools;
42287 list_for_each_entry(p, &_manager->pools, pools) {
42288 - unsigned nr_free;
42289 + unsigned long nr_free;
42293 @@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42294 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42295 freed += nr_free - shrink_pages;
42297 - pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42298 + pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42299 p->pool->dev_name, p->pool->name, current->pid,
42300 nr_free, shrink_pages);
42302 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42303 index 5fc16ce..1bd84ec 100644
42304 --- a/drivers/gpu/drm/udl/udl_fb.c
42305 +++ b/drivers/gpu/drm/udl/udl_fb.c
42306 @@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42307 fb_deferred_io_cleanup(info);
42308 kfree(info->fbdefio);
42309 info->fbdefio = NULL;
42310 - info->fbops->fb_mmap = udl_fb_mmap;
42313 pr_warn("released /dev/fb%d user=%d count=%d\n",
42314 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42315 index ef8c500..01030c8 100644
42316 --- a/drivers/gpu/drm/via/via_drv.h
42317 +++ b/drivers/gpu/drm/via/via_drv.h
42318 @@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42319 typedef uint32_t maskarray_t[5];
42321 typedef struct drm_via_irq {
42322 - atomic_t irq_received;
42323 + atomic_unchecked_t irq_received;
42324 uint32_t pending_mask;
42325 uint32_t enable_mask;
42326 wait_queue_head_t irq_queue;
42327 @@ -77,7 +77,7 @@ typedef struct drm_via_private {
42328 struct timeval last_vblank;
42329 int last_vblank_valid;
42330 unsigned usec_per_vblank;
42331 - atomic_t vbl_received;
42332 + atomic_unchecked_t vbl_received;
42333 drm_via_state_t hc_state;
42334 char pci_buf[VIA_PCI_BUF_SIZE];
42335 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42336 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42337 index 1319433..a993b0c 100644
42338 --- a/drivers/gpu/drm/via/via_irq.c
42339 +++ b/drivers/gpu/drm/via/via_irq.c
42340 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42344 - return atomic_read(&dev_priv->vbl_received);
42345 + return atomic_read_unchecked(&dev_priv->vbl_received);
42348 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42349 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42351 status = VIA_READ(VIA_REG_INTERRUPT);
42352 if (status & VIA_IRQ_VBLANK_PENDING) {
42353 - atomic_inc(&dev_priv->vbl_received);
42354 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42355 + atomic_inc_unchecked(&dev_priv->vbl_received);
42356 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42357 do_gettimeofday(&cur_vblank);
42358 if (dev_priv->last_vblank_valid) {
42359 dev_priv->usec_per_vblank =
42360 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42361 dev_priv->last_vblank = cur_vblank;
42362 dev_priv->last_vblank_valid = 1;
42364 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42365 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42366 DRM_DEBUG("US per vblank is: %u\n",
42367 dev_priv->usec_per_vblank);
42369 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42371 for (i = 0; i < dev_priv->num_irqs; ++i) {
42372 if (status & cur_irq->pending_mask) {
42373 - atomic_inc(&cur_irq->irq_received);
42374 + atomic_inc_unchecked(&cur_irq->irq_received);
42375 wake_up(&cur_irq->irq_queue);
42377 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42378 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42379 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42380 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42382 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42383 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42385 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42386 (((cur_irq_sequence =
42387 - atomic_read(&cur_irq->irq_received)) -
42388 + atomic_read_unchecked(&cur_irq->irq_received)) -
42389 *sequence) <= (1 << 23)));
42391 *sequence = cur_irq_sequence;
42392 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42395 for (i = 0; i < dev_priv->num_irqs; ++i) {
42396 - atomic_set(&cur_irq->irq_received, 0);
42397 + atomic_set_unchecked(&cur_irq->irq_received, 0);
42398 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42399 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42400 init_waitqueue_head(&cur_irq->irq_queue);
42401 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42402 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42403 case VIA_IRQ_RELATIVE:
42404 irqwait->request.sequence +=
42405 - atomic_read(&cur_irq->irq_received);
42406 + atomic_read_unchecked(&cur_irq->irq_received);
42407 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42408 case VIA_IRQ_ABSOLUTE:
42410 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42411 index d26a6da..5fa41ed 100644
42412 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42413 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42414 @@ -447,7 +447,7 @@ struct vmw_private {
42415 * Fencing and IRQs.
42418 - atomic_t marker_seq;
42419 + atomic_unchecked_t marker_seq;
42420 wait_queue_head_t fence_queue;
42421 wait_queue_head_t fifo_queue;
42422 spinlock_t waiter_lock;
42423 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42424 index 39f2b03..d1b0a64 100644
42425 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42426 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42427 @@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42428 (unsigned int) min,
42429 (unsigned int) fifo->capabilities);
42431 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42432 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42433 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42434 vmw_marker_queue_init(&fifo->marker_queue);
42435 return vmw_fifo_send_fence(dev_priv, &dummy);
42436 @@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42438 iowrite32(bytes, fifo_mem +
42439 SVGA_FIFO_RESERVED);
42440 - return fifo_mem + (next_cmd >> 2);
42441 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42443 need_bounce = true;
42445 @@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42447 fm = vmw_fifo_reserve(dev_priv, bytes);
42448 if (unlikely(fm == NULL)) {
42449 - *seqno = atomic_read(&dev_priv->marker_seq);
42450 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42452 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42454 @@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42458 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42459 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42460 } while (*seqno == 0);
42462 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42463 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42464 index 170b61b..fec7348 100644
42465 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42466 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42467 @@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42470 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42471 - vmw_gmrid_man_init,
42472 - vmw_gmrid_man_takedown,
42473 - vmw_gmrid_man_get_node,
42474 - vmw_gmrid_man_put_node,
42475 - vmw_gmrid_man_debug
42476 + .init = vmw_gmrid_man_init,
42477 + .takedown = vmw_gmrid_man_takedown,
42478 + .get_node = vmw_gmrid_man_get_node,
42479 + .put_node = vmw_gmrid_man_put_node,
42480 + .debug = vmw_gmrid_man_debug
42482 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42483 index 69c8ce2..cacb0ab 100644
42484 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42485 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42486 @@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42489 num_clips = arg->num_clips;
42490 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42491 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42493 if (unlikely(num_clips == 0))
42495 @@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42498 num_clips = arg->num_clips;
42499 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42500 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42502 if (unlikely(num_clips == 0))
42504 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42505 index 9fe9827..0aa2fc0 100644
42506 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42507 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42508 @@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42509 * emitted. Then the fence is stale and signaled.
42512 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42513 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42517 @@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42520 down_read(&fifo_state->rwsem);
42521 - signal_seq = atomic_read(&dev_priv->marker_seq);
42522 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42526 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42527 index efd1ffd..0ae13ca 100644
42528 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42529 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42530 @@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42531 while (!vmw_lag_lt(queue, us)) {
42532 spin_lock(&queue->lock);
42533 if (list_empty(&queue->head))
42534 - seqno = atomic_read(&dev_priv->marker_seq);
42535 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42537 marker = list_first_entry(&queue->head,
42538 struct vmw_marker, head);
42539 diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42540 index 37ac7b5..d52a5c9 100644
42541 --- a/drivers/gpu/vga/vga_switcheroo.c
42542 +++ b/drivers/gpu/vga/vga_switcheroo.c
42543 @@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42545 /* this version is for the case where the power switch is separate
42546 to the device being powered down. */
42547 -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42548 +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42550 /* copy over all the bus versions */
42551 if (dev->bus && dev->bus->pm) {
42552 @@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42556 -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42557 +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42559 /* copy over all the bus versions */
42560 if (dev->bus && dev->bus->pm) {
42561 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42562 index 722a925..594c312 100644
42563 --- a/drivers/hid/hid-core.c
42564 +++ b/drivers/hid/hid-core.c
42565 @@ -2552,7 +2552,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42567 int hid_add_device(struct hid_device *hdev)
42569 - static atomic_t id = ATOMIC_INIT(0);
42570 + static atomic_unchecked_t id = ATOMIC_INIT(0);
42573 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42574 @@ -2595,7 +2595,7 @@ int hid_add_device(struct hid_device *hdev)
42575 /* XXX hack, any other cleaner solution after the driver core
42576 * is converted to allow more than 20 bytes as the device name? */
42577 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42578 - hdev->vendor, hdev->product, atomic_inc_return(&id));
42579 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42581 hid_debug_register(hdev, dev_name(&hdev->dev));
42582 ret = device_add(&hdev->dev);
42583 diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
42584 index 5614fee..8301fbf 100644
42585 --- a/drivers/hid/hid-sensor-custom.c
42586 +++ b/drivers/hid/hid-sensor-custom.c
42587 @@ -34,7 +34,7 @@ struct hid_sensor_custom_field {
42589 char group_name[HID_CUSTOM_NAME_LENGTH];
42590 struct hid_sensor_hub_attribute_info attribute;
42591 - struct device_attribute sd_attrs[HID_CUSTOM_MAX_CORE_ATTRS];
42592 + device_attribute_no_const sd_attrs[HID_CUSTOM_MAX_CORE_ATTRS];
42593 char attr_name[HID_CUSTOM_TOTAL_ATTRS][HID_CUSTOM_NAME_LENGTH];
42594 struct attribute *attrs[HID_CUSTOM_TOTAL_ATTRS];
42595 struct attribute_group hid_custom_attribute_group;
42596 @@ -590,7 +590,7 @@ static int hid_sensor_custom_add_attributes(struct hid_sensor_custom
42598 while (j < HID_CUSTOM_TOTAL_ATTRS &&
42599 hid_custom_attrs[j].name) {
42600 - struct device_attribute *device_attr;
42601 + device_attribute_no_const *device_attr;
42603 device_attr = &sensor_inst->fields[i].sd_attrs[j];
42605 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42606 index c13fb5b..55a3802 100644
42607 --- a/drivers/hid/hid-wiimote-debug.c
42608 +++ b/drivers/hid/hid-wiimote-debug.c
42609 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42610 else if (size == 0)
42613 - if (copy_to_user(u, buf, size))
42614 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
42618 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42619 index 54da66d..aa3a3d7 100644
42620 --- a/drivers/hv/channel.c
42621 +++ b/drivers/hv/channel.c
42622 @@ -373,7 +373,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42625 next_gpadl_handle =
42626 - (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42627 + (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42629 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42631 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42632 index d3943bc..3de28a9 100644
42633 --- a/drivers/hv/hv.c
42634 +++ b/drivers/hv/hv.c
42635 @@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42636 u64 output_address = (output) ? virt_to_phys(output) : 0;
42637 u32 output_address_hi = output_address >> 32;
42638 u32 output_address_lo = output_address & 0xFFFFFFFF;
42639 - void *hypercall_page = hv_context.hypercall_page;
42640 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42642 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42643 "=a"(hv_status_lo) : "d" (control_hi),
42644 @@ -164,7 +164,7 @@ int hv_init(void)
42645 /* See if the hypercall page is already set */
42646 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42648 - virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42649 + virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42653 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42654 index cb5b7dc..6052f22 100644
42655 --- a/drivers/hv/hv_balloon.c
42656 +++ b/drivers/hv/hv_balloon.c
42657 @@ -469,7 +469,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42659 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42660 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42661 -static atomic_t trans_id = ATOMIC_INIT(0);
42662 +static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42664 static int dm_ring_size = (5 * PAGE_SIZE);
42666 @@ -941,7 +941,7 @@ static void hot_add_req(struct work_struct *dummy)
42667 pr_info("Memory hot add failed\n");
42669 dm->state = DM_INITIALIZED;
42670 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
42671 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42672 vmbus_sendpacket(dm->dev->channel, &resp,
42673 sizeof(struct dm_hot_add_response),
42674 (unsigned long)NULL,
42675 @@ -1022,7 +1022,7 @@ static void post_status(struct hv_dynmem_device *dm)
42676 memset(&status, 0, sizeof(struct dm_status));
42677 status.hdr.type = DM_STATUS_REPORT;
42678 status.hdr.size = sizeof(struct dm_status);
42679 - status.hdr.trans_id = atomic_inc_return(&trans_id);
42680 + status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42683 * The host expects the guest to report free and committed memory.
42684 @@ -1046,7 +1046,7 @@ static void post_status(struct hv_dynmem_device *dm)
42685 * send the status. This can happen if we were interrupted
42686 * after we picked our transaction ID.
42688 - if (status.hdr.trans_id != atomic_read(&trans_id))
42689 + if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42693 @@ -1191,7 +1191,7 @@ static void balloon_up(struct work_struct *dummy)
42697 - bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42698 + bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42699 ret = vmbus_sendpacket(dm_device.dev->channel,
42702 @@ -1237,7 +1237,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42704 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42705 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42706 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
42707 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42708 resp.hdr.size = sizeof(struct dm_unballoon_response);
42710 vmbus_sendpacket(dm_device.dev->channel, &resp,
42711 @@ -1298,7 +1298,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42712 memset(&version_req, 0, sizeof(struct dm_version_request));
42713 version_req.hdr.type = DM_VERSION_REQUEST;
42714 version_req.hdr.size = sizeof(struct dm_version_request);
42715 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42716 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42717 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42718 version_req.is_last_attempt = 1;
42720 @@ -1471,7 +1471,7 @@ static int balloon_probe(struct hv_device *dev,
42721 memset(&version_req, 0, sizeof(struct dm_version_request));
42722 version_req.hdr.type = DM_VERSION_REQUEST;
42723 version_req.hdr.size = sizeof(struct dm_version_request);
42724 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42725 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42726 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42727 version_req.is_last_attempt = 0;
42729 @@ -1502,7 +1502,7 @@ static int balloon_probe(struct hv_device *dev,
42730 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42731 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42732 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42733 - cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42734 + cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42736 cap_msg.caps.cap_bits.balloon = 1;
42737 cap_msg.caps.cap_bits.hot_add = 1;
42738 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42739 index 887287a..238a626 100644
42740 --- a/drivers/hv/hyperv_vmbus.h
42741 +++ b/drivers/hv/hyperv_vmbus.h
42742 @@ -645,7 +645,7 @@ enum vmbus_connect_state {
42743 struct vmbus_connection {
42744 enum vmbus_connect_state conn_state;
42746 - atomic_t next_gpadl_handle;
42747 + atomic_unchecked_t next_gpadl_handle;
42750 * Represents channel interrupts. Each bit position represents a
42751 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42752 index 579bdf9..0dac21d5 100644
42753 --- a/drivers/hwmon/acpi_power_meter.c
42754 +++ b/drivers/hwmon/acpi_power_meter.c
42755 @@ -116,7 +116,7 @@ struct sensor_template {
42756 struct device_attribute *devattr,
42757 const char *buf, size_t count);
42762 /* Averaging interval */
42763 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42764 @@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42765 struct sensor_template *attrs)
42767 struct device *dev = &resource->acpi_dev->dev;
42768 - struct sensor_device_attribute *sensors =
42769 + sensor_device_attribute_no_const *sensors =
42770 &resource->sensors[resource->num_sensors];
42773 @@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42777 -static struct dmi_system_id __initdata pm_dmi_table[] = {
42778 +static const struct dmi_system_id __initconst pm_dmi_table[] = {
42780 enable_cap_knobs, "IBM Active Energy Manager",
42782 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42783 index 0af63da..05a183a 100644
42784 --- a/drivers/hwmon/applesmc.c
42785 +++ b/drivers/hwmon/applesmc.c
42786 @@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42788 struct applesmc_node_group *grp;
42789 struct applesmc_dev_attr *node;
42790 - struct attribute *attr;
42791 + attribute_no_const *attr;
42794 for (grp = groups; grp->format; grp++) {
42795 diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42796 index cccef87..06ce8ec 100644
42797 --- a/drivers/hwmon/asus_atk0110.c
42798 +++ b/drivers/hwmon/asus_atk0110.c
42799 @@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42800 struct atk_sensor_data {
42801 struct list_head list;
42802 struct atk_data *data;
42803 - struct device_attribute label_attr;
42804 - struct device_attribute input_attr;
42805 - struct device_attribute limit1_attr;
42806 - struct device_attribute limit2_attr;
42807 + device_attribute_no_const label_attr;
42808 + device_attribute_no_const input_attr;
42809 + device_attribute_no_const limit1_attr;
42810 + device_attribute_no_const limit2_attr;
42811 char label_attr_name[ATTR_NAME_SIZE];
42812 char input_attr_name[ATTR_NAME_SIZE];
42813 char limit1_attr_name[ATTR_NAME_SIZE];
42814 @@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42815 static struct device_attribute atk_name_attr =
42816 __ATTR(name, 0444, atk_name_show, NULL);
42818 -static void atk_init_attribute(struct device_attribute *attr, char *name,
42819 +static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42820 sysfs_show_func show)
42822 sysfs_attr_init(&attr->attr);
42823 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42824 index ed303ba..e24bd26f 100644
42825 --- a/drivers/hwmon/coretemp.c
42826 +++ b/drivers/hwmon/coretemp.c
42827 @@ -782,7 +782,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42831 -static struct notifier_block coretemp_cpu_notifier __refdata = {
42832 +static struct notifier_block coretemp_cpu_notifier = {
42833 .notifier_call = coretemp_cpu_callback,
42836 diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42837 index 7a8a6fb..015c1fd 100644
42838 --- a/drivers/hwmon/ibmaem.c
42839 +++ b/drivers/hwmon/ibmaem.c
42840 @@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42841 struct aem_rw_sensor_template *rw)
42843 struct device *dev = &data->pdev->dev;
42844 - struct sensor_device_attribute *sensors = data->sensors;
42845 + sensor_device_attribute_no_const *sensors = data->sensors;
42848 /* Set up read-only sensors */
42849 diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42850 index 17ae2eb..21b71dd 100644
42851 --- a/drivers/hwmon/iio_hwmon.c
42852 +++ b/drivers/hwmon/iio_hwmon.c
42853 @@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42855 struct device *dev = &pdev->dev;
42856 struct iio_hwmon_state *st;
42857 - struct sensor_device_attribute *a;
42858 + sensor_device_attribute_no_const *a;
42860 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42861 enum iio_chan_type type;
42862 diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42863 index 37f0170..414ec2c 100644
42864 --- a/drivers/hwmon/nct6683.c
42865 +++ b/drivers/hwmon/nct6683.c
42866 @@ -397,11 +397,11 @@ static struct attribute_group *
42867 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42870 - struct sensor_device_attribute_2 *a2;
42871 - struct sensor_device_attribute *a;
42872 + sensor_device_attribute_2_no_const *a2;
42873 + sensor_device_attribute_no_const *a;
42874 struct sensor_device_template **t;
42875 struct sensor_device_attr_u *su;
42876 - struct attribute_group *group;
42877 + attribute_group_no_const *group;
42878 struct attribute **attrs;
42881 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42882 index bd1c99d..2fa55ad 100644
42883 --- a/drivers/hwmon/nct6775.c
42884 +++ b/drivers/hwmon/nct6775.c
42885 @@ -953,10 +953,10 @@ static struct attribute_group *
42886 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42889 - struct attribute_group *group;
42890 + attribute_group_no_const *group;
42891 struct sensor_device_attr_u *su;
42892 - struct sensor_device_attribute *a;
42893 - struct sensor_device_attribute_2 *a2;
42894 + sensor_device_attribute_no_const *a;
42895 + sensor_device_attribute_2_no_const *a2;
42896 struct attribute **attrs;
42897 struct sensor_device_template **t;
42899 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42900 index f2e47c7..45d7941 100644
42901 --- a/drivers/hwmon/pmbus/pmbus_core.c
42902 +++ b/drivers/hwmon/pmbus/pmbus_core.c
42903 @@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42907 -static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42908 +static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42911 ssize_t (*show)(struct device *dev,
42912 @@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42913 dev_attr->store = store;
42916 -static void pmbus_attr_init(struct sensor_device_attribute *a,
42917 +static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42920 ssize_t (*show)(struct device *dev,
42921 @@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42924 struct pmbus_boolean *boolean;
42925 - struct sensor_device_attribute *a;
42926 + sensor_device_attribute_no_const *a;
42928 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42930 @@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42931 bool update, bool readonly)
42933 struct pmbus_sensor *sensor;
42934 - struct device_attribute *a;
42935 + device_attribute_no_const *a;
42937 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42939 @@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42940 const char *lstring, int index)
42942 struct pmbus_label *label;
42943 - struct device_attribute *a;
42944 + device_attribute_no_const *a;
42946 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42948 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42949 index d4f0935..7420593 100644
42950 --- a/drivers/hwmon/sht15.c
42951 +++ b/drivers/hwmon/sht15.c
42952 @@ -169,7 +169,7 @@ struct sht15_data {
42954 bool supply_uv_valid;
42955 struct work_struct update_supply_work;
42956 - atomic_t interrupt_handled;
42957 + atomic_unchecked_t interrupt_handled;
42961 @@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42962 ret = gpio_direction_input(data->pdata->gpio_data);
42965 - atomic_set(&data->interrupt_handled, 0);
42966 + atomic_set_unchecked(&data->interrupt_handled, 0);
42968 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42969 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42970 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42971 /* Only relevant if the interrupt hasn't occurred. */
42972 - if (!atomic_read(&data->interrupt_handled))
42973 + if (!atomic_read_unchecked(&data->interrupt_handled))
42974 schedule_work(&data->read_work);
42976 ret = wait_event_timeout(data->wait_queue,
42977 @@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42979 /* First disable the interrupt */
42980 disable_irq_nosync(irq);
42981 - atomic_inc(&data->interrupt_handled);
42982 + atomic_inc_unchecked(&data->interrupt_handled);
42983 /* Then schedule a reading work struct */
42984 if (data->state != SHT15_READING_NOTHING)
42985 schedule_work(&data->read_work);
42986 @@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42987 * If not, then start the interrupt again - care here as could
42988 * have gone low in meantime so verify it hasn't!
42990 - atomic_set(&data->interrupt_handled, 0);
42991 + atomic_set_unchecked(&data->interrupt_handled, 0);
42992 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42993 /* If still not occurred or another handler was scheduled */
42994 if (gpio_get_value(data->pdata->gpio_data)
42995 - || atomic_read(&data->interrupt_handled))
42996 + || atomic_read_unchecked(&data->interrupt_handled))
43000 diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43001 index ac91c07..8e69663 100644
43002 --- a/drivers/hwmon/via-cputemp.c
43003 +++ b/drivers/hwmon/via-cputemp.c
43004 @@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43008 -static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43009 +static struct notifier_block via_cputemp_cpu_notifier = {
43010 .notifier_call = via_cputemp_cpu_callback,
43013 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43014 index 65e3240..e6c511d 100644
43015 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
43016 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43018 extern struct i2c_adapter amd756_smbus;
43020 static struct i2c_adapter *s4882_adapter;
43021 -static struct i2c_algorithm *s4882_algo;
43022 +static i2c_algorithm_no_const *s4882_algo;
43024 /* Wrapper access functions for multiplexed SMBus */
43025 static DEFINE_MUTEX(amd756_lock);
43026 diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43027 index b19a310..d6eece0 100644
43028 --- a/drivers/i2c/busses/i2c-diolan-u2c.c
43029 +++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43030 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43033 /* Send command to device, and get response. */
43034 -static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43035 +static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43039 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43040 index 88eda09..cf40434 100644
43041 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43042 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43044 extern struct i2c_adapter *nforce2_smbus;
43046 static struct i2c_adapter *s4985_adapter;
43047 -static struct i2c_algorithm *s4985_algo;
43048 +static i2c_algorithm_no_const *s4985_algo;
43050 /* Wrapper access functions for multiplexed SMBus */
43051 static DEFINE_MUTEX(nforce2_lock);
43052 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43053 index 71c7a39..71dd3e0 100644
43054 --- a/drivers/i2c/i2c-dev.c
43055 +++ b/drivers/i2c/i2c-dev.c
43056 @@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43060 - data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43061 + data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43062 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43063 if (IS_ERR(rdwr_pa[i].buf)) {
43064 res = PTR_ERR(rdwr_pa[i].buf);
43065 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43066 index 0b510ba..4fbb5085 100644
43067 --- a/drivers/ide/ide-cd.c
43068 +++ b/drivers/ide/ide-cd.c
43069 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43070 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43071 if ((unsigned long)buf & alignment
43072 || blk_rq_bytes(rq) & q->dma_pad_mask
43073 - || object_is_on_stack(buf))
43074 + || object_starts_on_stack(buf))
43078 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43079 index 4df97f6..c751151 100644
43080 --- a/drivers/iio/industrialio-core.c
43081 +++ b/drivers/iio/industrialio-core.c
43082 @@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43086 -int __iio_device_attr_init(struct device_attribute *dev_attr,
43087 +int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43088 const char *postfix,
43089 struct iio_chan_spec const *chan,
43090 ssize_t (*readfunc)(struct device *dev,
43091 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43092 index 0271608..81998c5 100644
43093 --- a/drivers/infiniband/core/cm.c
43094 +++ b/drivers/infiniband/core/cm.c
43095 @@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43097 struct cm_counter_group {
43098 struct kobject obj;
43099 - atomic_long_t counter[CM_ATTR_COUNT];
43100 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43103 struct cm_counter_attribute {
43104 @@ -1397,7 +1397,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43105 struct ib_mad_send_buf *msg = NULL;
43108 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43109 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43110 counter[CM_REQ_COUNTER]);
43112 /* Quick state check to discard duplicate REQs. */
43113 @@ -1784,7 +1784,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43117 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43118 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43119 counter[CM_REP_COUNTER]);
43120 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43122 @@ -1951,7 +1951,7 @@ static int cm_rtu_handler(struct cm_work *work)
43123 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43124 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43125 spin_unlock_irq(&cm_id_priv->lock);
43126 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43127 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43128 counter[CM_RTU_COUNTER]);
43131 @@ -2134,7 +2134,7 @@ static int cm_dreq_handler(struct cm_work *work)
43132 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43133 dreq_msg->local_comm_id);
43135 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43136 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43137 counter[CM_DREQ_COUNTER]);
43138 cm_issue_drep(work->port, work->mad_recv_wc);
43140 @@ -2159,7 +2159,7 @@ static int cm_dreq_handler(struct cm_work *work)
43141 case IB_CM_MRA_REP_RCVD:
43143 case IB_CM_TIMEWAIT:
43144 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43145 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43146 counter[CM_DREQ_COUNTER]);
43147 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43149 @@ -2173,7 +2173,7 @@ static int cm_dreq_handler(struct cm_work *work)
43152 case IB_CM_DREQ_RCVD:
43153 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43154 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43155 counter[CM_DREQ_COUNTER]);
43158 @@ -2540,7 +2540,7 @@ static int cm_mra_handler(struct cm_work *work)
43159 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43160 cm_id_priv->msg, timeout)) {
43161 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43162 - atomic_long_inc(&work->port->
43163 + atomic_long_inc_unchecked(&work->port->
43164 counter_group[CM_RECV_DUPLICATES].
43165 counter[CM_MRA_COUNTER]);
43167 @@ -2549,7 +2549,7 @@ static int cm_mra_handler(struct cm_work *work)
43169 case IB_CM_MRA_REQ_RCVD:
43170 case IB_CM_MRA_REP_RCVD:
43171 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43172 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43173 counter[CM_MRA_COUNTER]);
43176 @@ -2711,7 +2711,7 @@ static int cm_lap_handler(struct cm_work *work)
43177 case IB_CM_LAP_IDLE:
43179 case IB_CM_MRA_LAP_SENT:
43180 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43181 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43182 counter[CM_LAP_COUNTER]);
43183 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43185 @@ -2727,7 +2727,7 @@ static int cm_lap_handler(struct cm_work *work)
43188 case IB_CM_LAP_RCVD:
43189 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43190 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43191 counter[CM_LAP_COUNTER]);
43194 @@ -3011,7 +3011,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43195 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43196 if (cur_cm_id_priv) {
43197 spin_unlock_irq(&cm.lock);
43198 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43199 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43200 counter[CM_SIDR_REQ_COUNTER]);
43201 goto out; /* Duplicate message. */
43203 @@ -3223,10 +3223,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43204 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43207 - atomic_long_add(1 + msg->retries,
43208 + atomic_long_add_unchecked(1 + msg->retries,
43209 &port->counter_group[CM_XMIT].counter[attr_index]);
43211 - atomic_long_add(msg->retries,
43212 + atomic_long_add_unchecked(msg->retries,
43213 &port->counter_group[CM_XMIT_RETRIES].
43214 counter[attr_index]);
43216 @@ -3436,7 +3436,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43219 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43220 - atomic_long_inc(&port->counter_group[CM_RECV].
43221 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43222 counter[attr_id - CM_ATTR_ID_OFFSET]);
43224 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43225 @@ -3667,7 +3667,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43226 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43228 return sprintf(buf, "%ld\n",
43229 - atomic_long_read(&group->counter[cm_attr->index]));
43230 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43233 static const struct sysfs_ops cm_counter_ops = {
43234 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43235 index 9f5ad7c..588cd84 100644
43236 --- a/drivers/infiniband/core/fmr_pool.c
43237 +++ b/drivers/infiniband/core/fmr_pool.c
43238 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
43240 struct task_struct *thread;
43242 - atomic_t req_ser;
43243 - atomic_t flush_ser;
43244 + atomic_unchecked_t req_ser;
43245 + atomic_unchecked_t flush_ser;
43247 wait_queue_head_t force_wait;
43249 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43250 struct ib_fmr_pool *pool = pool_ptr;
43253 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43254 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43255 ib_fmr_batch_release(pool);
43257 - atomic_inc(&pool->flush_ser);
43258 + atomic_inc_unchecked(&pool->flush_ser);
43259 wake_up_interruptible(&pool->force_wait);
43261 if (pool->flush_function)
43262 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43265 set_current_state(TASK_INTERRUPTIBLE);
43266 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43267 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43268 !kthread_should_stop())
43270 __set_current_state(TASK_RUNNING);
43271 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43272 pool->dirty_watermark = params->dirty_watermark;
43273 pool->dirty_len = 0;
43274 spin_lock_init(&pool->pool_lock);
43275 - atomic_set(&pool->req_ser, 0);
43276 - atomic_set(&pool->flush_ser, 0);
43277 + atomic_set_unchecked(&pool->req_ser, 0);
43278 + atomic_set_unchecked(&pool->flush_ser, 0);
43279 init_waitqueue_head(&pool->force_wait);
43281 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43282 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43284 spin_unlock_irq(&pool->pool_lock);
43286 - serial = atomic_inc_return(&pool->req_ser);
43287 + serial = atomic_inc_return_unchecked(&pool->req_ser);
43288 wake_up_process(pool->thread);
43290 if (wait_event_interruptible(pool->force_wait,
43291 - atomic_read(&pool->flush_ser) - serial >= 0))
43292 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43296 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43298 list_add_tail(&fmr->list, &pool->dirty_list);
43299 if (++pool->dirty_len >= pool->dirty_watermark) {
43300 - atomic_inc(&pool->req_ser);
43301 + atomic_inc_unchecked(&pool->req_ser);
43302 wake_up_process(pool->thread);
43305 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43306 index a9f0489..27a161b 100644
43307 --- a/drivers/infiniband/core/uverbs_cmd.c
43308 +++ b/drivers/infiniband/core/uverbs_cmd.c
43309 @@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43310 if (copy_from_user(&cmd, buf, sizeof cmd))
43313 + if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43316 INIT_UDATA(&udata, buf + sizeof cmd,
43317 (unsigned long) cmd.response + sizeof resp,
43318 in_len - sizeof cmd, out_len - sizeof resp);
43319 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43320 index cff815b..75576dd 100644
43321 --- a/drivers/infiniband/hw/cxgb4/mem.c
43322 +++ b/drivers/infiniband/hw/cxgb4/mem.c
43323 @@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43325 struct fw_ri_tpte tpt;
43327 - static atomic_t key;
43328 + static atomic_unchecked_t key;
43330 if (c4iw_fatal_error(rdev))
43332 @@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43333 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43334 rdev->stats.stag.max = rdev->stats.stag.cur;
43335 mutex_unlock(&rdev->stats.lock);
43336 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43337 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43339 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43340 __func__, stag_state, type, pdid, stag_idx);
43341 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43342 index 79b3dbc..96e5fcc 100644
43343 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
43344 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43345 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43346 struct ib_atomic_eth *ateth;
43347 struct ipath_ack_entry *e;
43349 - atomic64_t *maddr;
43350 + atomic64_unchecked_t *maddr;
43354 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43355 IB_ACCESS_REMOTE_ATOMIC)))
43356 goto nack_acc_unlck;
43357 /* Perform atomic OP and save result. */
43358 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43359 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43360 sdata = be64_to_cpu(ateth->swap_data);
43361 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43362 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43363 - (u64) atomic64_add_return(sdata, maddr) - sdata :
43364 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43365 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43366 be64_to_cpu(ateth->compare_data),
43368 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43369 index 1f95bba..9530f87 100644
43370 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43371 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43372 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43373 unsigned long flags;
43376 - atomic64_t *maddr;
43377 + atomic64_unchecked_t *maddr;
43378 enum ib_wc_status send_status;
43381 @@ -382,11 +382,11 @@ again:
43382 IB_ACCESS_REMOTE_ATOMIC)))
43384 /* Perform atomic OP and save result. */
43385 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43386 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43387 sdata = wqe->wr.wr.atomic.compare_add;
43388 *(u64 *) sqp->s_sge.sge.vaddr =
43389 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43390 - (u64) atomic64_add_return(sdata, maddr) - sdata :
43391 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43392 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43393 sdata, wqe->wr.wr.atomic.swap);
43395 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43396 index 9cd2b00..7486df4 100644
43397 --- a/drivers/infiniband/hw/mlx4/mad.c
43398 +++ b/drivers/infiniband/hw/mlx4/mad.c
43399 @@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43401 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43403 - return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43404 + return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43405 cpu_to_be64(0xff00000000000000LL);
43408 diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43409 index ed327e6..ca1739e0 100644
43410 --- a/drivers/infiniband/hw/mlx4/mcg.c
43411 +++ b/drivers/infiniband/hw/mlx4/mcg.c
43412 @@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43416 - atomic_set(&ctx->tid, 0);
43417 + atomic_set_unchecked(&ctx->tid, 0);
43418 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43419 ctx->mcg_wq = create_singlethread_workqueue(name);
43421 diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43422 index fce39343..9d8fdff 100644
43423 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43424 +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43425 @@ -435,7 +435,7 @@ struct mlx4_ib_demux_ctx {
43426 struct list_head mcg_mgid0_list;
43427 struct workqueue_struct *mcg_wq;
43428 struct mlx4_ib_demux_pv_ctx **tun;
43430 + atomic_unchecked_t tid;
43431 int flushing; /* flushing the work queue */
43434 diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43435 index 9d3e5c1..6f166df 100644
43436 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43437 +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43438 @@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43439 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43442 -int mthca_QUERY_FW(struct mthca_dev *dev)
43443 +int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43445 struct mthca_mailbox *mailbox;
43447 @@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43451 -int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43452 +int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43455 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43456 @@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43457 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43460 -int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43461 +int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43464 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43465 @@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43469 -int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43470 +int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43471 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43472 void *in_mad, void *response_mad)
43474 diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43475 index ded76c1..0cf0a08 100644
43476 --- a/drivers/infiniband/hw/mthca/mthca_main.c
43477 +++ b/drivers/infiniband/hw/mthca/mthca_main.c
43478 @@ -692,7 +692,7 @@ err_close:
43482 -static int mthca_setup_hca(struct mthca_dev *dev)
43483 +static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43487 diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43488 index ed9a989..6aa5dc2 100644
43489 --- a/drivers/infiniband/hw/mthca/mthca_mr.c
43490 +++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43491 @@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43492 * through the bitmaps)
43495 -static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43496 +static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43500 @@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43504 -int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43505 +int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43506 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43508 struct mthca_mailbox *mailbox;
43509 @@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43510 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43513 -int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43514 +int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43515 u64 *buffer_list, int buffer_size_shift,
43516 int list_len, u64 iova, u64 total_size,
43517 u32 access, struct mthca_mr *mr)
43518 diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43519 index 415f8e1..e34214e 100644
43520 --- a/drivers/infiniband/hw/mthca/mthca_provider.c
43521 +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43522 @@ -764,7 +764,7 @@ unlock:
43526 -static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43527 +static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43529 struct mthca_dev *dev = to_mdev(ibcq->device);
43530 struct mthca_cq *cq = to_mcq(ibcq);
43531 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43532 index 9f9d5c5..3c19aac 100644
43533 --- a/drivers/infiniband/hw/nes/nes.c
43534 +++ b/drivers/infiniband/hw/nes/nes.c
43535 @@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43536 LIST_HEAD(nes_adapter_list);
43537 static LIST_HEAD(nes_dev_list);
43539 -atomic_t qps_destroyed;
43540 +atomic_unchecked_t qps_destroyed;
43542 static unsigned int ee_flsh_adapter;
43543 static unsigned int sysfs_nonidx_addr;
43544 @@ -279,7 +279,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43545 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43546 struct nes_adapter *nesadapter = nesdev->nesadapter;
43548 - atomic_inc(&qps_destroyed);
43549 + atomic_inc_unchecked(&qps_destroyed);
43551 /* Free the control structures */
43553 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43554 index bd9d132..70d84f4 100644
43555 --- a/drivers/infiniband/hw/nes/nes.h
43556 +++ b/drivers/infiniband/hw/nes/nes.h
43557 @@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43558 extern unsigned int wqm_quanta;
43559 extern struct list_head nes_adapter_list;
43561 -extern atomic_t cm_connects;
43562 -extern atomic_t cm_accepts;
43563 -extern atomic_t cm_disconnects;
43564 -extern atomic_t cm_closes;
43565 -extern atomic_t cm_connecteds;
43566 -extern atomic_t cm_connect_reqs;
43567 -extern atomic_t cm_rejects;
43568 -extern atomic_t mod_qp_timouts;
43569 -extern atomic_t qps_created;
43570 -extern atomic_t qps_destroyed;
43571 -extern atomic_t sw_qps_destroyed;
43572 +extern atomic_unchecked_t cm_connects;
43573 +extern atomic_unchecked_t cm_accepts;
43574 +extern atomic_unchecked_t cm_disconnects;
43575 +extern atomic_unchecked_t cm_closes;
43576 +extern atomic_unchecked_t cm_connecteds;
43577 +extern atomic_unchecked_t cm_connect_reqs;
43578 +extern atomic_unchecked_t cm_rejects;
43579 +extern atomic_unchecked_t mod_qp_timouts;
43580 +extern atomic_unchecked_t qps_created;
43581 +extern atomic_unchecked_t qps_destroyed;
43582 +extern atomic_unchecked_t sw_qps_destroyed;
43583 extern u32 mh_detected;
43584 extern u32 mh_pauses_sent;
43585 extern u32 cm_packets_sent;
43586 @@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43587 extern u32 cm_packets_received;
43588 extern u32 cm_packets_dropped;
43589 extern u32 cm_packets_retrans;
43590 -extern atomic_t cm_listens_created;
43591 -extern atomic_t cm_listens_destroyed;
43592 +extern atomic_unchecked_t cm_listens_created;
43593 +extern atomic_unchecked_t cm_listens_destroyed;
43594 extern u32 cm_backlog_drops;
43595 -extern atomic_t cm_loopbacks;
43596 -extern atomic_t cm_nodes_created;
43597 -extern atomic_t cm_nodes_destroyed;
43598 -extern atomic_t cm_accel_dropped_pkts;
43599 -extern atomic_t cm_resets_recvd;
43600 -extern atomic_t pau_qps_created;
43601 -extern atomic_t pau_qps_destroyed;
43602 +extern atomic_unchecked_t cm_loopbacks;
43603 +extern atomic_unchecked_t cm_nodes_created;
43604 +extern atomic_unchecked_t cm_nodes_destroyed;
43605 +extern atomic_unchecked_t cm_accel_dropped_pkts;
43606 +extern atomic_unchecked_t cm_resets_recvd;
43607 +extern atomic_unchecked_t pau_qps_created;
43608 +extern atomic_unchecked_t pau_qps_destroyed;
43610 extern u32 int_mod_timer_init;
43611 extern u32 int_mod_cq_depth_256;
43612 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43613 index 72b4341..2600332 100644
43614 --- a/drivers/infiniband/hw/nes/nes_cm.c
43615 +++ b/drivers/infiniband/hw/nes/nes_cm.c
43616 @@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43617 u32 cm_packets_retrans;
43618 u32 cm_packets_created;
43619 u32 cm_packets_received;
43620 -atomic_t cm_listens_created;
43621 -atomic_t cm_listens_destroyed;
43622 +atomic_unchecked_t cm_listens_created;
43623 +atomic_unchecked_t cm_listens_destroyed;
43624 u32 cm_backlog_drops;
43625 -atomic_t cm_loopbacks;
43626 -atomic_t cm_nodes_created;
43627 -atomic_t cm_nodes_destroyed;
43628 -atomic_t cm_accel_dropped_pkts;
43629 -atomic_t cm_resets_recvd;
43630 +atomic_unchecked_t cm_loopbacks;
43631 +atomic_unchecked_t cm_nodes_created;
43632 +atomic_unchecked_t cm_nodes_destroyed;
43633 +atomic_unchecked_t cm_accel_dropped_pkts;
43634 +atomic_unchecked_t cm_resets_recvd;
43636 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43637 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43638 @@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43639 /* instance of function pointers for client API */
43640 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43641 static struct nes_cm_ops nes_cm_api = {
43642 - mini_cm_accelerated,
43644 - mini_cm_del_listen,
43649 - mini_cm_recv_pkt,
43650 - mini_cm_dealloc_core,
43653 + .accelerated = mini_cm_accelerated,
43654 + .listen = mini_cm_listen,
43655 + .stop_listener = mini_cm_del_listen,
43656 + .connect = mini_cm_connect,
43657 + .close = mini_cm_close,
43658 + .accept = mini_cm_accept,
43659 + .reject = mini_cm_reject,
43660 + .recv_pkt = mini_cm_recv_pkt,
43661 + .destroy_cm_core = mini_cm_dealloc_core,
43662 + .get = mini_cm_get,
43663 + .set = mini_cm_set
43666 static struct nes_cm_core *g_cm_core;
43668 -atomic_t cm_connects;
43669 -atomic_t cm_accepts;
43670 -atomic_t cm_disconnects;
43671 -atomic_t cm_closes;
43672 -atomic_t cm_connecteds;
43673 -atomic_t cm_connect_reqs;
43674 -atomic_t cm_rejects;
43675 +atomic_unchecked_t cm_connects;
43676 +atomic_unchecked_t cm_accepts;
43677 +atomic_unchecked_t cm_disconnects;
43678 +atomic_unchecked_t cm_closes;
43679 +atomic_unchecked_t cm_connecteds;
43680 +atomic_unchecked_t cm_connect_reqs;
43681 +atomic_unchecked_t cm_rejects;
43683 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43685 @@ -1461,7 +1461,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43689 - atomic_inc(&cm_listens_destroyed);
43690 + atomic_inc_unchecked(&cm_listens_destroyed);
43692 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43694 @@ -1667,7 +1667,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43697 add_hte_node(cm_core, cm_node);
43698 - atomic_inc(&cm_nodes_created);
43699 + atomic_inc_unchecked(&cm_nodes_created);
43703 @@ -1728,7 +1728,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43706 atomic_dec(&cm_core->node_cnt);
43707 - atomic_inc(&cm_nodes_destroyed);
43708 + atomic_inc_unchecked(&cm_nodes_destroyed);
43709 nesqp = cm_node->nesqp;
43711 nesqp->cm_node = NULL;
43712 @@ -1792,7 +1792,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43714 static void drop_packet(struct sk_buff *skb)
43716 - atomic_inc(&cm_accel_dropped_pkts);
43717 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
43718 dev_kfree_skb_any(skb);
43721 @@ -1855,7 +1855,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43724 int reset = 0; /* whether to send reset in case of err.. */
43725 - atomic_inc(&cm_resets_recvd);
43726 + atomic_inc_unchecked(&cm_resets_recvd);
43727 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43728 " refcnt=%d\n", cm_node, cm_node->state,
43729 atomic_read(&cm_node->ref_count));
43730 @@ -2523,7 +2523,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43731 rem_ref_cm_node(cm_node->cm_core, cm_node);
43734 - atomic_inc(&cm_loopbacks);
43735 + atomic_inc_unchecked(&cm_loopbacks);
43736 loopbackremotenode->loopbackpartner = cm_node;
43737 loopbackremotenode->tcp_cntxt.rcv_wscale =
43738 NES_CM_DEFAULT_RCV_WND_SCALE;
43739 @@ -2804,7 +2804,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43740 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43742 rem_ref_cm_node(cm_core, cm_node);
43743 - atomic_inc(&cm_accel_dropped_pkts);
43744 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
43745 dev_kfree_skb_any(skb);
43748 @@ -3112,7 +3112,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43750 if ((cm_id) && (cm_id->event_handler)) {
43751 if (issue_disconn) {
43752 - atomic_inc(&cm_disconnects);
43753 + atomic_inc_unchecked(&cm_disconnects);
43754 cm_event.event = IW_CM_EVENT_DISCONNECT;
43755 cm_event.status = disconn_status;
43756 cm_event.local_addr = cm_id->local_addr;
43757 @@ -3134,7 +3134,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43761 - atomic_inc(&cm_closes);
43762 + atomic_inc_unchecked(&cm_closes);
43763 nes_disconnect(nesqp, 1);
43765 cm_id->provider_data = nesqp;
43766 @@ -3272,7 +3272,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43768 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43769 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43770 - atomic_inc(&cm_accepts);
43771 + atomic_inc_unchecked(&cm_accepts);
43773 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43774 netdev_refcnt_read(nesvnic->netdev));
43775 @@ -3470,7 +3470,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43776 struct nes_cm_core *cm_core;
43779 - atomic_inc(&cm_rejects);
43780 + atomic_inc_unchecked(&cm_rejects);
43781 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43782 loopback = cm_node->loopbackpartner;
43783 cm_core = cm_node->cm_core;
43784 @@ -3535,7 +3535,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43785 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43786 ntohs(laddr->sin_port));
43788 - atomic_inc(&cm_connects);
43789 + atomic_inc_unchecked(&cm_connects);
43790 nesqp->active_conn = 1;
43792 /* cache the cm_id in the qp */
43793 @@ -3680,7 +3680,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43794 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43797 - atomic_inc(&cm_listens_created);
43798 + atomic_inc_unchecked(&cm_listens_created);
43801 cm_id->add_ref(cm_id);
43802 @@ -3787,7 +3787,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43804 if (nesqp->destroyed)
43806 - atomic_inc(&cm_connecteds);
43807 + atomic_inc_unchecked(&cm_connecteds);
43808 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43809 " local port 0x%04X. jiffies = %lu.\n",
43810 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43811 @@ -3972,7 +3972,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43813 cm_id->add_ref(cm_id);
43814 ret = cm_id->event_handler(cm_id, &cm_event);
43815 - atomic_inc(&cm_closes);
43816 + atomic_inc_unchecked(&cm_closes);
43817 cm_event.event = IW_CM_EVENT_CLOSE;
43818 cm_event.status = 0;
43819 cm_event.provider_data = cm_id->provider_data;
43820 @@ -4012,7 +4012,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43822 cm_id = cm_node->cm_id;
43824 - atomic_inc(&cm_connect_reqs);
43825 + atomic_inc_unchecked(&cm_connect_reqs);
43826 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43827 cm_node, cm_id, jiffies);
43829 @@ -4061,7 +4061,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43831 cm_id = cm_node->cm_id;
43833 - atomic_inc(&cm_connect_reqs);
43834 + atomic_inc_unchecked(&cm_connect_reqs);
43835 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43836 cm_node, cm_id, jiffies);
43838 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43839 index 4166452..fc952c3 100644
43840 --- a/drivers/infiniband/hw/nes/nes_mgt.c
43841 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
43844 #include "nes_mgt.h"
43846 -atomic_t pau_qps_created;
43847 -atomic_t pau_qps_destroyed;
43848 +atomic_unchecked_t pau_qps_created;
43849 +atomic_unchecked_t pau_qps_destroyed;
43851 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43853 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43855 struct sk_buff *skb;
43856 unsigned long flags;
43857 - atomic_inc(&pau_qps_destroyed);
43858 + atomic_inc_unchecked(&pau_qps_destroyed);
43860 /* Free packets that have not yet been forwarded */
43861 /* Lock is acquired by skb_dequeue when removing the skb */
43862 @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43863 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43864 skb_queue_head_init(&nesqp->pau_list);
43865 spin_lock_init(&nesqp->pau_lock);
43866 - atomic_inc(&pau_qps_created);
43867 + atomic_inc_unchecked(&pau_qps_created);
43868 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43871 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43872 index 70acda9..a96de9d 100644
43873 --- a/drivers/infiniband/hw/nes/nes_nic.c
43874 +++ b/drivers/infiniband/hw/nes/nes_nic.c
43875 @@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43876 target_stat_values[++index] = mh_detected;
43877 target_stat_values[++index] = mh_pauses_sent;
43878 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43879 - target_stat_values[++index] = atomic_read(&cm_connects);
43880 - target_stat_values[++index] = atomic_read(&cm_accepts);
43881 - target_stat_values[++index] = atomic_read(&cm_disconnects);
43882 - target_stat_values[++index] = atomic_read(&cm_connecteds);
43883 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43884 - target_stat_values[++index] = atomic_read(&cm_rejects);
43885 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43886 - target_stat_values[++index] = atomic_read(&qps_created);
43887 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43888 - target_stat_values[++index] = atomic_read(&qps_destroyed);
43889 - target_stat_values[++index] = atomic_read(&cm_closes);
43890 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43891 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43892 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43893 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43894 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43895 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43896 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43897 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43898 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43899 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43900 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43901 target_stat_values[++index] = cm_packets_sent;
43902 target_stat_values[++index] = cm_packets_bounced;
43903 target_stat_values[++index] = cm_packets_created;
43904 target_stat_values[++index] = cm_packets_received;
43905 target_stat_values[++index] = cm_packets_dropped;
43906 target_stat_values[++index] = cm_packets_retrans;
43907 - target_stat_values[++index] = atomic_read(&cm_listens_created);
43908 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43909 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43910 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43911 target_stat_values[++index] = cm_backlog_drops;
43912 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
43913 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
43914 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43915 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43916 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43917 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43918 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43919 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43920 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43921 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43922 target_stat_values[++index] = nesadapter->free_4kpbl;
43923 target_stat_values[++index] = nesadapter->free_256pbl;
43924 target_stat_values[++index] = int_mod_timer_init;
43925 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43926 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43927 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43928 - target_stat_values[++index] = atomic_read(&pau_qps_created);
43929 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43930 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43931 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43935 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43936 index c0d0296..3185f57 100644
43937 --- a/drivers/infiniband/hw/nes/nes_verbs.c
43938 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
43941 #include <rdma/ib_umem.h>
43943 -atomic_t mod_qp_timouts;
43944 -atomic_t qps_created;
43945 -atomic_t sw_qps_destroyed;
43946 +atomic_unchecked_t mod_qp_timouts;
43947 +atomic_unchecked_t qps_created;
43948 +atomic_unchecked_t sw_qps_destroyed;
43950 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43952 @@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43953 if (init_attr->create_flags)
43954 return ERR_PTR(-EINVAL);
43956 - atomic_inc(&qps_created);
43957 + atomic_inc_unchecked(&qps_created);
43958 switch (init_attr->qp_type) {
43960 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43961 @@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43962 struct iw_cm_event cm_event;
43965 - atomic_inc(&sw_qps_destroyed);
43966 + atomic_inc_unchecked(&sw_qps_destroyed);
43967 nesqp->destroyed = 1;
43969 /* Blow away the connection if it exists. */
43970 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43971 index 7df16f7..7e1b21e 100644
43972 --- a/drivers/infiniband/hw/qib/qib.h
43973 +++ b/drivers/infiniband/hw/qib/qib.h
43975 #include <linux/kref.h>
43976 #include <linux/sched.h>
43977 #include <linux/kthread.h>
43978 +#include <linux/slab.h>
43980 #include "qib_common.h"
43981 #include "qib_verbs.h"
43982 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43983 index cdc7df4..a2fdfdb 100644
43984 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43985 +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43986 @@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43987 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43990 -static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43991 +static struct rtnl_link_ops ipoib_link_ops = {
43993 .maxtype = IFLA_IPOIB_MAX,
43994 .policy = ipoib_policy,
43995 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43996 index e853a21..56fc5a8 100644
43997 --- a/drivers/input/gameport/gameport.c
43998 +++ b/drivers/input/gameport/gameport.c
43999 @@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44001 static void gameport_init_port(struct gameport *gameport)
44003 - static atomic_t gameport_no = ATOMIC_INIT(-1);
44004 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
44006 __module_get(THIS_MODULE);
44008 mutex_init(&gameport->drv_mutex);
44009 device_initialize(&gameport->dev);
44010 dev_set_name(&gameport->dev, "gameport%lu",
44011 - (unsigned long)atomic_inc_return(&gameport_no));
44012 + (unsigned long)atomic_inc_return_unchecked(&gameport_no));
44013 gameport->dev.bus = &gameport_bus;
44014 gameport->dev.release = gameport_release_port;
44015 if (gameport->parent)
44016 diff --git a/drivers/input/input.c b/drivers/input/input.c
44017 index cc357f1..ee42fbc 100644
44018 --- a/drivers/input/input.c
44019 +++ b/drivers/input/input.c
44020 @@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
44022 struct input_dev *input_allocate_device(void)
44024 - static atomic_t input_no = ATOMIC_INIT(-1);
44025 + static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
44026 struct input_dev *dev;
44028 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44029 @@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
44030 INIT_LIST_HEAD(&dev->node);
44032 dev_set_name(&dev->dev, "input%lu",
44033 - (unsigned long)atomic_inc_return(&input_no));
44034 + (unsigned long)atomic_inc_return_unchecked(&input_no));
44036 __module_get(THIS_MODULE);
44038 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44039 index 4a95b22..874c182 100644
44040 --- a/drivers/input/joystick/sidewinder.c
44041 +++ b/drivers/input/joystick/sidewinder.c
44043 #include <linux/kernel.h>
44044 #include <linux/module.h>
44045 #include <linux/slab.h>
44046 +#include <linux/sched.h>
44047 #include <linux/input.h>
44048 #include <linux/gameport.h>
44049 #include <linux/jiffies.h>
44050 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44051 index 61c7611..e1bfa38 100644
44052 --- a/drivers/input/joystick/xpad.c
44053 +++ b/drivers/input/joystick/xpad.c
44054 @@ -905,7 +905,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44056 static int xpad_led_probe(struct usb_xpad *xpad)
44058 - static atomic_t led_seq = ATOMIC_INIT(-1);
44059 + static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
44060 unsigned long led_no;
44061 struct xpad_led *led;
44062 struct led_classdev *led_cdev;
44063 @@ -918,7 +918,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44067 - led_no = atomic_inc_return(&led_seq);
44068 + led_no = atomic_inc_return_unchecked(&led_seq);
44070 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
44072 diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44073 index ac1fa5f..5f7502c 100644
44074 --- a/drivers/input/misc/ims-pcu.c
44075 +++ b/drivers/input/misc/ims-pcu.c
44076 @@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44078 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44080 - static atomic_t device_no = ATOMIC_INIT(-1);
44081 + static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
44083 const struct ims_pcu_device_info *info;
44085 @@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44088 /* Device appears to be operable, complete initialization */
44089 - pcu->device_no = atomic_inc_return(&device_no);
44090 + pcu->device_no = atomic_inc_return_unchecked(&device_no);
44093 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44094 diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44095 index ad5a5a1..5eac214 100644
44096 --- a/drivers/input/mouse/psmouse.h
44097 +++ b/drivers/input/mouse/psmouse.h
44098 @@ -125,7 +125,7 @@ struct psmouse_attribute {
44099 ssize_t (*set)(struct psmouse *psmouse, void *data,
44100 const char *buf, size_t count);
44104 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44106 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44107 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44108 index b604564..3f14ae4 100644
44109 --- a/drivers/input/mousedev.c
44110 +++ b/drivers/input/mousedev.c
44111 @@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44113 spin_unlock_irq(&client->packet_lock);
44115 - if (copy_to_user(buffer, data, count))
44116 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
44120 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44121 index a05a517..323a2fd 100644
44122 --- a/drivers/input/serio/serio.c
44123 +++ b/drivers/input/serio/serio.c
44124 @@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44126 static void serio_init_port(struct serio *serio)
44128 - static atomic_t serio_no = ATOMIC_INIT(-1);
44129 + static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
44131 __module_get(THIS_MODULE);
44133 @@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44134 mutex_init(&serio->drv_mutex);
44135 device_initialize(&serio->dev);
44136 dev_set_name(&serio->dev, "serio%lu",
44137 - (unsigned long)atomic_inc_return(&serio_no));
44138 + (unsigned long)atomic_inc_return_unchecked(&serio_no));
44139 serio->dev.bus = &serio_bus;
44140 serio->dev.release = serio_release_port;
44141 serio->dev.groups = serio_device_attr_groups;
44142 diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44143 index 71ef5d6..93380a9 100644
44144 --- a/drivers/input/serio/serio_raw.c
44145 +++ b/drivers/input/serio/serio_raw.c
44146 @@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44148 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44150 - static atomic_t serio_raw_no = ATOMIC_INIT(-1);
44151 + static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
44152 struct serio_raw *serio_raw;
44155 @@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44158 snprintf(serio_raw->name, sizeof(serio_raw->name),
44159 - "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
44160 + "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
44161 kref_init(&serio_raw->kref);
44162 INIT_LIST_HEAD(&serio_raw->client_list);
44163 init_waitqueue_head(&serio_raw->wait);
44164 diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
44165 index 92e2243..8fd9092 100644
44166 --- a/drivers/input/touchscreen/htcpen.c
44167 +++ b/drivers/input/touchscreen/htcpen.c
44168 @@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
44172 -static struct dmi_system_id htcshift_dmi_table[] __initdata = {
44173 +static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
44177 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
44178 index ca9f4ed..b860ff1 100644
44179 --- a/drivers/iommu/amd_iommu.c
44180 +++ b/drivers/iommu/amd_iommu.c
44181 @@ -829,11 +829,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
44183 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44185 + phys_addr_t physaddr;
44186 WARN_ON(address & 0x7ULL);
44188 memset(cmd, 0, sizeof(*cmd));
44189 - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44190 - cmd->data[1] = upper_32_bits(__pa(address));
44192 +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44193 + if (object_starts_on_stack((void *)address)) {
44194 + void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44195 + physaddr = __pa((u64)adjbuf);
44198 + physaddr = __pa(address);
44200 + cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44201 + cmd->data[1] = upper_32_bits(physaddr);
44203 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44205 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44206 index 65075ef..53823f9 100644
44207 --- a/drivers/iommu/arm-smmu.c
44208 +++ b/drivers/iommu/arm-smmu.c
44209 @@ -331,7 +331,7 @@ enum arm_smmu_domain_stage {
44211 struct arm_smmu_domain {
44212 struct arm_smmu_device *smmu;
44213 - struct io_pgtable_ops *pgtbl_ops;
44214 + struct io_pgtable *pgtbl;
44215 spinlock_t pgtbl_lock;
44216 struct arm_smmu_cfg cfg;
44217 enum arm_smmu_domain_stage stage;
44218 @@ -817,7 +817,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44220 int irq, start, ret = 0;
44221 unsigned long ias, oas;
44222 - struct io_pgtable_ops *pgtbl_ops;
44223 + struct io_pgtable *pgtbl;
44224 struct io_pgtable_cfg pgtbl_cfg;
44225 enum io_pgtable_fmt fmt;
44226 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44227 @@ -902,14 +902,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44230 smmu_domain->smmu = smmu;
44231 - pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44232 - if (!pgtbl_ops) {
44233 + pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44236 goto out_clear_smmu;
44239 /* Update our support page sizes to reflect the page table format */
44240 - arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44241 + pax_open_kernel();
44242 + *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44243 + pax_close_kernel();
44245 /* Initialise the context bank with our page table cfg */
44246 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44247 @@ -930,7 +932,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44248 mutex_unlock(&smmu_domain->init_mutex);
44250 /* Publish page table ops for map/unmap */
44251 - smmu_domain->pgtbl_ops = pgtbl_ops;
44252 + smmu_domain->pgtbl = pgtbl;
44256 @@ -963,8 +965,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44257 free_irq(irq, domain);
44260 - if (smmu_domain->pgtbl_ops)
44261 - free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44262 + free_io_pgtable(smmu_domain->pgtbl);
44264 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44266 @@ -1190,13 +1191,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44268 unsigned long flags;
44269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44270 - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44271 + struct io_pgtable *iop = smmu_domain->pgtbl;
44277 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44278 - ret = ops->map(ops, iova, paddr, size, prot);
44279 + ret = iop->ops->map(iop, iova, paddr, size, prot);
44280 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44283 @@ -1207,13 +1208,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44285 unsigned long flags;
44286 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44287 - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44288 + struct io_pgtable *iop = smmu_domain->pgtbl;
44294 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44295 - ret = ops->unmap(ops, iova, size);
44296 + ret = iop->ops->unmap(iop, iova, size);
44297 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44300 @@ -1224,7 +1225,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44301 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44302 struct arm_smmu_device *smmu = smmu_domain->smmu;
44303 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44304 - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44305 + struct io_pgtable *iop = smmu_domain->pgtbl;
44306 struct device *dev = smmu->dev;
44307 void __iomem *cb_base;
44309 @@ -1247,7 +1248,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44311 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44313 - return ops->iova_to_phys(ops, iova);
44314 + return iop->ops->iova_to_phys(iop, iova);
44317 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44318 @@ -1268,9 +1269,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44320 unsigned long flags;
44321 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44322 - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44323 + struct io_pgtable *iop = smmu_domain->pgtbl;
44329 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44330 @@ -1278,7 +1279,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44331 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44332 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44334 - ret = ops->iova_to_phys(ops, iova);
44335 + ret = iop->ops->iova_to_phys(iop, iova);
44338 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44339 @@ -1668,7 +1669,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44340 size |= SZ_64K | SZ_512M;
44343 - arm_smmu_ops.pgsize_bitmap &= size;
44344 + pax_open_kernel();
44345 + *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44346 + pax_close_kernel();
44347 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44349 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44350 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
44351 index 5ecfaf2..c87c4b1 100644
44352 --- a/drivers/iommu/intel-iommu.c
44353 +++ b/drivers/iommu/intel-iommu.c
44354 @@ -1756,8 +1756,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
44356 static void domain_exit(struct dmar_domain *domain)
44358 + struct dmar_drhd_unit *drhd;
44359 + struct intel_iommu *iommu;
44360 struct page *freelist = NULL;
44363 /* Domain 0 is reserved, so dont process it */
44365 @@ -1777,8 +1778,10 @@ static void domain_exit(struct dmar_domain *domain)
44367 /* clear attached or cached domains */
44369 - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
44370 - iommu_detach_domain(domain, g_iommus[i]);
44371 + for_each_active_iommu(iommu, drhd)
44372 + if (domain_type_is_vm(domain) ||
44373 + test_bit(iommu->seq_id, domain->iommu_bmp))
44374 + iommu_detach_domain(domain, iommu);
44377 dma_free_pagelist(freelist);
44378 diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44379 index 4e46021..f0a24fef 100644
44380 --- a/drivers/iommu/io-pgtable-arm.c
44381 +++ b/drivers/iommu/io-pgtable-arm.c
44383 #define io_pgtable_to_data(x) \
44384 container_of((x), struct arm_lpae_io_pgtable, iop)
44386 -#define io_pgtable_ops_to_pgtable(x) \
44387 - container_of((x), struct io_pgtable, ops)
44389 -#define io_pgtable_ops_to_data(x) \
44390 - io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44393 * For consistency with the architecture, we always consider
44394 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44395 @@ -304,10 +298,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44399 -static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44400 +static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44401 phys_addr_t paddr, size_t size, int iommu_prot)
44403 - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44404 + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44405 arm_lpae_iopte *ptep = data->pgd;
44406 int lvl = ARM_LPAE_START_LVL(data);
44407 arm_lpae_iopte prot;
44408 @@ -447,12 +441,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44409 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44412 -static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44413 +static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44417 - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44418 - struct io_pgtable *iop = &data->iop;
44419 + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44420 arm_lpae_iopte *ptep = data->pgd;
44421 int lvl = ARM_LPAE_START_LVL(data);
44423 @@ -463,10 +456,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44427 -static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44428 +static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44429 unsigned long iova)
44431 - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44432 + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44433 arm_lpae_iopte pte, *ptep = data->pgd;
44434 int lvl = ARM_LPAE_START_LVL(data);
44436 @@ -533,6 +526,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44440 +static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44441 + .map = arm_lpae_map,
44442 + .unmap = arm_lpae_unmap,
44443 + .iova_to_phys = arm_lpae_iova_to_phys,
44446 static struct arm_lpae_io_pgtable *
44447 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44449 @@ -564,11 +563,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44450 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44451 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44453 - data->iop.ops = (struct io_pgtable_ops) {
44454 - .map = arm_lpae_map,
44455 - .unmap = arm_lpae_unmap,
44456 - .iova_to_phys = arm_lpae_iova_to_phys,
44458 + data->iop.ops = &arm_lpae_io_pgtable_ops;
44462 @@ -830,9 +825,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44463 .flush_pgtable = dummy_flush_pgtable,
44466 -static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44467 +static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44469 - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44470 + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44471 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44473 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44474 @@ -842,9 +837,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44475 data->bits_per_level, data->pgd);
44478 -#define __FAIL(ops, i) ({ \
44479 +#define __FAIL(iop, i) ({ \
44480 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44481 - arm_lpae_dump_ops(ops); \
44482 + arm_lpae_dump_ops(iop); \
44483 selftest_running = false; \
44486 @@ -859,30 +854,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44488 unsigned long iova;
44490 - struct io_pgtable_ops *ops;
44491 + struct io_pgtable *iop;
44492 + const struct io_pgtable_ops *ops;
44494 selftest_running = true;
44496 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44498 - ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44500 + iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44502 pr_err("selftest: failed to allocate io pgtable ops\n");
44508 * Initial sanity checks.
44509 * Empty page tables shouldn't provide any translations.
44511 - if (ops->iova_to_phys(ops, 42))
44512 - return __FAIL(ops, i);
44513 + if (ops->iova_to_phys(iop, 42))
44514 + return __FAIL(iop, i);
44516 - if (ops->iova_to_phys(ops, SZ_1G + 42))
44517 - return __FAIL(ops, i);
44518 + if (ops->iova_to_phys(iop, SZ_1G + 42))
44519 + return __FAIL(iop, i);
44521 - if (ops->iova_to_phys(ops, SZ_2G + 42))
44522 - return __FAIL(ops, i);
44523 + if (ops->iova_to_phys(iop, SZ_2G + 42))
44524 + return __FAIL(iop, i);
44527 * Distinct mappings of different granule sizes.
44528 @@ -892,19 +889,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44529 while (j != BITS_PER_LONG) {
44532 - if (ops->map(ops, iova, iova, size, IOMMU_READ |
44533 + if (ops->map(iop, iova, iova, size, IOMMU_READ |
44537 - return __FAIL(ops, i);
44538 + return __FAIL(iop, i);
44540 /* Overlapping mappings */
44541 - if (!ops->map(ops, iova, iova + size, size,
44542 + if (!ops->map(iop, iova, iova + size, size,
44543 IOMMU_READ | IOMMU_NOEXEC))
44544 - return __FAIL(ops, i);
44545 + return __FAIL(iop, i);
44547 - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44548 - return __FAIL(ops, i);
44549 + if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44550 + return __FAIL(iop, i);
44554 @@ -913,15 +910,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44556 /* Partial unmap */
44557 size = 1UL << __ffs(cfg->pgsize_bitmap);
44558 - if (ops->unmap(ops, SZ_1G + size, size) != size)
44559 - return __FAIL(ops, i);
44560 + if (ops->unmap(iop, SZ_1G + size, size) != size)
44561 + return __FAIL(iop, i);
44563 /* Remap of partial unmap */
44564 - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44565 - return __FAIL(ops, i);
44566 + if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44567 + return __FAIL(iop, i);
44569 - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44570 - return __FAIL(ops, i);
44571 + if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44572 + return __FAIL(iop, i);
44576 @@ -929,25 +926,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44577 while (j != BITS_PER_LONG) {
44580 - if (ops->unmap(ops, iova, size) != size)
44581 - return __FAIL(ops, i);
44582 + if (ops->unmap(iop, iova, size) != size)
44583 + return __FAIL(iop, i);
44585 - if (ops->iova_to_phys(ops, iova + 42))
44586 - return __FAIL(ops, i);
44587 + if (ops->iova_to_phys(iop, iova + 42))
44588 + return __FAIL(iop, i);
44590 /* Remap full block */
44591 - if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44592 - return __FAIL(ops, i);
44593 + if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44594 + return __FAIL(iop, i);
44596 - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44597 - return __FAIL(ops, i);
44598 + if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44599 + return __FAIL(iop, i);
44603 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44606 - free_io_pgtable_ops(ops);
44607 + free_io_pgtable(iop);
44610 selftest_running = false;
44611 diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44612 index 6436fe2..088c965 100644
44613 --- a/drivers/iommu/io-pgtable.c
44614 +++ b/drivers/iommu/io-pgtable.c
44615 @@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44619 -struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44620 +struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44621 struct io_pgtable_cfg *cfg,
44624 @@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44625 iop->cookie = cookie;
44628 - return &iop->ops;
44633 * It is the IOMMU driver's responsibility to ensure that the page table
44634 * is no longer accessible to the walker by this point.
44636 -void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44637 +void free_io_pgtable(struct io_pgtable *iop)
44639 - struct io_pgtable *iop;
44645 - iop = container_of(ops, struct io_pgtable, ops);
44646 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44647 io_pgtable_init_table[iop->fmt]->free(iop);
44649 diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44650 index 10e32f6..0b276c8 100644
44651 --- a/drivers/iommu/io-pgtable.h
44652 +++ b/drivers/iommu/io-pgtable.h
44653 @@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44654 * These functions map directly onto the iommu_ops member functions with
44657 +struct io_pgtable;
44658 struct io_pgtable_ops {
44659 - int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44660 + int (*map)(struct io_pgtable *iop, unsigned long iova,
44661 phys_addr_t paddr, size_t size, int prot);
44662 - int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44663 + int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44665 - phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44666 + phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44667 unsigned long iova);
44671 - * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44672 + * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44674 * @fmt: The page table format.
44675 * @cfg: The page table configuration. This will be modified to represent
44676 @@ -94,9 +95,9 @@ struct io_pgtable_ops {
44677 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44678 * the callback routines in cfg->tlb.
44680 -struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44681 - struct io_pgtable_cfg *cfg,
44683 +struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44684 + struct io_pgtable_cfg *cfg,
44688 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44689 @@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44691 * @ops: The ops returned from alloc_io_pgtable_ops.
44693 -void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44694 +void free_io_pgtable(struct io_pgtable *iop);
44698 @@ -125,7 +126,7 @@ struct io_pgtable {
44699 enum io_pgtable_fmt fmt;
44701 struct io_pgtable_cfg cfg;
44702 - struct io_pgtable_ops ops;
44703 + const struct io_pgtable_ops *ops;
44707 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44708 index d4f527e..8e4a4fd 100644
44709 --- a/drivers/iommu/iommu.c
44710 +++ b/drivers/iommu/iommu.c
44711 @@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44712 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44715 - struct notifier_block *nb;
44716 + notifier_block_no_const *nb;
44717 struct iommu_callback_data cb = {
44720 diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44721 index 1a67c53..23181d8 100644
44722 --- a/drivers/iommu/ipmmu-vmsa.c
44723 +++ b/drivers/iommu/ipmmu-vmsa.c
44724 @@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44725 struct iommu_domain io_domain;
44727 struct io_pgtable_cfg cfg;
44728 - struct io_pgtable_ops *iop;
44729 + struct io_pgtable *iop;
44731 unsigned int context_id;
44732 spinlock_t lock; /* Protects mappings */
44733 @@ -328,8 +328,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44734 domain->cfg.oas = 40;
44735 domain->cfg.tlb = &ipmmu_gather_ops;
44737 - domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44739 + domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44743 @@ -487,7 +486,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
44746 ipmmu_domain_destroy_context(domain);
44747 - free_io_pgtable_ops(domain->iop);
44748 + free_io_pgtable(domain->iop);
44752 @@ -556,7 +555,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44756 - return domain->iop->map(domain->iop, iova, paddr, size, prot);
44757 + return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44760 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44761 @@ -564,7 +563,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44763 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
44765 - return domain->iop->unmap(domain->iop, iova, size);
44766 + return domain->iop->ops->unmap(domain->iop, iova, size);
44769 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44770 @@ -574,7 +573,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44772 /* TODO: Is locking needed ? */
44774 - return domain->iop->iova_to_phys(domain->iop, iova);
44775 + return domain->iop->ops->iova_to_phys(domain->iop, iova);
44778 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44779 diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44780 index 390079e..1da9d6c 100644
44781 --- a/drivers/iommu/irq_remapping.c
44782 +++ b/drivers/iommu/irq_remapping.c
44783 @@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44784 void panic_if_irq_remap(const char *msg)
44786 if (irq_remapping_enabled)
44788 + panic("%s", msg);
44791 static void ir_ack_apic_edge(struct irq_data *data)
44792 @@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44794 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44796 - chip->irq_print_chip = ir_print_prefix;
44797 - chip->irq_ack = ir_ack_apic_edge;
44798 - chip->irq_eoi = ir_ack_apic_level;
44799 - chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44800 + pax_open_kernel();
44801 + *(void **)&chip->irq_print_chip = ir_print_prefix;
44802 + *(void **)&chip->irq_ack = ir_ack_apic_edge;
44803 + *(void **)&chip->irq_eoi = ir_ack_apic_level;
44804 + *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44805 + pax_close_kernel();
44808 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44809 diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44810 index 01999d7..4f14bb7 100644
44811 --- a/drivers/irqchip/irq-gic.c
44812 +++ b/drivers/irqchip/irq-gic.c
44813 @@ -313,7 +313,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44814 chained_irq_exit(chip, desc);
44817 -static struct irq_chip gic_chip = {
44818 +static irq_chip_no_const gic_chip __read_only = {
44820 .irq_mask = gic_mask_irq,
44821 .irq_unmask = gic_unmask_irq,
44822 diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44823 index 9a0767b..5e5f86f 100644
44824 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44825 +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44826 @@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44827 struct intc_irqpin_iomem *i;
44828 struct resource *io[INTC_IRQPIN_REG_NR];
44829 struct resource *irq;
44830 - struct irq_chip *irq_chip;
44831 + irq_chip_no_const *irq_chip;
44832 void (*enable_fn)(struct irq_data *d);
44833 void (*disable_fn)(struct irq_data *d);
44834 const char *name = dev_name(dev);
44835 diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44836 index cdf80b7..e5c3ade 100644
44837 --- a/drivers/irqchip/irq-renesas-irqc.c
44838 +++ b/drivers/irqchip/irq-renesas-irqc.c
44839 @@ -179,7 +179,7 @@ static int irqc_probe(struct platform_device *pdev)
44840 struct irqc_priv *p;
44841 struct resource *io;
44842 struct resource *irq;
44843 - struct irq_chip *irq_chip;
44844 + irq_chip_no_const *irq_chip;
44845 const char *name = dev_name(&pdev->dev);
44848 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44849 index 6a2df32..dc962f1 100644
44850 --- a/drivers/isdn/capi/capi.c
44851 +++ b/drivers/isdn/capi/capi.c
44852 @@ -81,8 +81,8 @@ struct capiminor {
44854 struct capi20_appl *ap;
44856 - atomic_t datahandle;
44858 + atomic_unchecked_t datahandle;
44859 + atomic_unchecked_t msgid;
44861 struct tty_port port;
44863 @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44864 capimsg_setu16(s, 2, mp->ap->applid);
44865 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44866 capimsg_setu8 (s, 5, CAPI_RESP);
44867 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44868 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44869 capimsg_setu32(s, 8, mp->ncci);
44870 capimsg_setu16(s, 12, datahandle);
44872 @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44873 mp->outbytes -= len;
44874 spin_unlock_bh(&mp->outlock);
44876 - datahandle = atomic_inc_return(&mp->datahandle);
44877 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44878 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44879 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44880 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44881 capimsg_setu16(skb->data, 2, mp->ap->applid);
44882 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44883 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44884 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44885 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44886 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44887 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44888 capimsg_setu16(skb->data, 16, len); /* Data length */
44889 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44890 index aecec6d..11e13c5 100644
44891 --- a/drivers/isdn/gigaset/bas-gigaset.c
44892 +++ b/drivers/isdn/gigaset/bas-gigaset.c
44893 @@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44896 static const struct gigaset_ops gigops = {
44897 - gigaset_write_cmd,
44898 - gigaset_write_room,
44899 - gigaset_chars_in_buffer,
44900 - gigaset_brkchars,
44901 - gigaset_init_bchannel,
44902 - gigaset_close_bchannel,
44903 - gigaset_initbcshw,
44904 - gigaset_freebcshw,
44905 - gigaset_reinitbcshw,
44906 - gigaset_initcshw,
44907 - gigaset_freecshw,
44908 - gigaset_set_modem_ctrl,
44909 - gigaset_baud_rate,
44910 - gigaset_set_line_ctrl,
44911 - gigaset_isoc_send_skb,
44912 - gigaset_isoc_input,
44913 + .write_cmd = gigaset_write_cmd,
44914 + .write_room = gigaset_write_room,
44915 + .chars_in_buffer = gigaset_chars_in_buffer,
44916 + .brkchars = gigaset_brkchars,
44917 + .init_bchannel = gigaset_init_bchannel,
44918 + .close_bchannel = gigaset_close_bchannel,
44919 + .initbcshw = gigaset_initbcshw,
44920 + .freebcshw = gigaset_freebcshw,
44921 + .reinitbcshw = gigaset_reinitbcshw,
44922 + .initcshw = gigaset_initcshw,
44923 + .freecshw = gigaset_freecshw,
44924 + .set_modem_ctrl = gigaset_set_modem_ctrl,
44925 + .baud_rate = gigaset_baud_rate,
44926 + .set_line_ctrl = gigaset_set_line_ctrl,
44927 + .send_skb = gigaset_isoc_send_skb,
44928 + .handle_input = gigaset_isoc_input,
44931 /* bas_gigaset_init
44932 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44933 index 600c79b..3752bab 100644
44934 --- a/drivers/isdn/gigaset/interface.c
44935 +++ b/drivers/isdn/gigaset/interface.c
44936 @@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44938 tty->driver_data = cs;
44940 - ++cs->port.count;
44941 + atomic_inc(&cs->port.count);
44943 - if (cs->port.count == 1) {
44944 + if (atomic_read(&cs->port.count) == 1) {
44945 tty_port_tty_set(&cs->port, tty);
44946 cs->port.low_latency = 1;
44948 @@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44950 if (!cs->connected)
44951 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44952 - else if (!cs->port.count)
44953 + else if (!atomic_read(&cs->port.count))
44954 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44955 - else if (!--cs->port.count)
44956 + else if (!atomic_dec_return(&cs->port.count))
44957 tty_port_tty_set(&cs->port, NULL);
44959 mutex_unlock(&cs->mutex);
44960 diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44961 index 8c91fd5..14f13ce 100644
44962 --- a/drivers/isdn/gigaset/ser-gigaset.c
44963 +++ b/drivers/isdn/gigaset/ser-gigaset.c
44964 @@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44967 static const struct gigaset_ops ops = {
44968 - gigaset_write_cmd,
44969 - gigaset_write_room,
44970 - gigaset_chars_in_buffer,
44971 - gigaset_brkchars,
44972 - gigaset_init_bchannel,
44973 - gigaset_close_bchannel,
44974 - gigaset_initbcshw,
44975 - gigaset_freebcshw,
44976 - gigaset_reinitbcshw,
44977 - gigaset_initcshw,
44978 - gigaset_freecshw,
44979 - gigaset_set_modem_ctrl,
44980 - gigaset_baud_rate,
44981 - gigaset_set_line_ctrl,
44982 - gigaset_m10x_send_skb, /* asyncdata.c */
44983 - gigaset_m10x_input, /* asyncdata.c */
44984 + .write_cmd = gigaset_write_cmd,
44985 + .write_room = gigaset_write_room,
44986 + .chars_in_buffer = gigaset_chars_in_buffer,
44987 + .brkchars = gigaset_brkchars,
44988 + .init_bchannel = gigaset_init_bchannel,
44989 + .close_bchannel = gigaset_close_bchannel,
44990 + .initbcshw = gigaset_initbcshw,
44991 + .freebcshw = gigaset_freebcshw,
44992 + .reinitbcshw = gigaset_reinitbcshw,
44993 + .initcshw = gigaset_initcshw,
44994 + .freecshw = gigaset_freecshw,
44995 + .set_modem_ctrl = gigaset_set_modem_ctrl,
44996 + .baud_rate = gigaset_baud_rate,
44997 + .set_line_ctrl = gigaset_set_line_ctrl,
44998 + .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44999 + .handle_input = gigaset_m10x_input, /* asyncdata.c */
45003 diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45004 index 5f306e2..5342f88 100644
45005 --- a/drivers/isdn/gigaset/usb-gigaset.c
45006 +++ b/drivers/isdn/gigaset/usb-gigaset.c
45007 @@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45008 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45009 memcpy(cs->hw.usb->bchars, buf, 6);
45010 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45011 - 0, 0, &buf, 6, 2000);
45012 + 0, 0, buf, 6, 2000);
45015 static void gigaset_freebcshw(struct bc_state *bcs)
45016 @@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45019 static const struct gigaset_ops ops = {
45020 - gigaset_write_cmd,
45021 - gigaset_write_room,
45022 - gigaset_chars_in_buffer,
45023 - gigaset_brkchars,
45024 - gigaset_init_bchannel,
45025 - gigaset_close_bchannel,
45026 - gigaset_initbcshw,
45027 - gigaset_freebcshw,
45028 - gigaset_reinitbcshw,
45029 - gigaset_initcshw,
45030 - gigaset_freecshw,
45031 - gigaset_set_modem_ctrl,
45032 - gigaset_baud_rate,
45033 - gigaset_set_line_ctrl,
45034 - gigaset_m10x_send_skb,
45035 - gigaset_m10x_input,
45036 + .write_cmd = gigaset_write_cmd,
45037 + .write_room = gigaset_write_room,
45038 + .chars_in_buffer = gigaset_chars_in_buffer,
45039 + .brkchars = gigaset_brkchars,
45040 + .init_bchannel = gigaset_init_bchannel,
45041 + .close_bchannel = gigaset_close_bchannel,
45042 + .initbcshw = gigaset_initbcshw,
45043 + .freebcshw = gigaset_freebcshw,
45044 + .reinitbcshw = gigaset_reinitbcshw,
45045 + .initcshw = gigaset_initcshw,
45046 + .freecshw = gigaset_freecshw,
45047 + .set_modem_ctrl = gigaset_set_modem_ctrl,
45048 + .baud_rate = gigaset_baud_rate,
45049 + .set_line_ctrl = gigaset_set_line_ctrl,
45050 + .send_skb = gigaset_m10x_send_skb,
45051 + .handle_input = gigaset_m10x_input,
45055 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45056 index 4d9b195..455075c 100644
45057 --- a/drivers/isdn/hardware/avm/b1.c
45058 +++ b/drivers/isdn/hardware/avm/b1.c
45059 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45062 if (t4file->user) {
45063 - if (copy_from_user(buf, dp, left))
45064 + if (left > sizeof buf || copy_from_user(buf, dp, left))
45067 memcpy(buf, dp, left);
45068 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45071 if (config->user) {
45072 - if (copy_from_user(buf, dp, left))
45073 + if (left > sizeof buf || copy_from_user(buf, dp, left))
45076 memcpy(buf, dp, left);
45077 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45078 index 9b856e1..fa03c92 100644
45079 --- a/drivers/isdn/i4l/isdn_common.c
45080 +++ b/drivers/isdn/i4l/isdn_common.c
45081 @@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45085 + if (!capable(CAP_SYS_RAWIO))
45088 if (copy_to_user(argp, &dev, sizeof(ulong)))
45090 diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45091 index 91d5730..336523e 100644
45092 --- a/drivers/isdn/i4l/isdn_concap.c
45093 +++ b/drivers/isdn/i4l/isdn_concap.c
45094 @@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45097 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45098 - &isdn_concap_dl_data_req,
45099 - &isdn_concap_dl_connect_req,
45100 - &isdn_concap_dl_disconn_req
45101 + .data_req = &isdn_concap_dl_data_req,
45102 + .connect_req = &isdn_concap_dl_connect_req,
45103 + .disconn_req = &isdn_concap_dl_disconn_req
45106 /* The following should better go into a dedicated source file such that
45107 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45108 index bc91261..2ef7e36 100644
45109 --- a/drivers/isdn/i4l/isdn_tty.c
45110 +++ b/drivers/isdn/i4l/isdn_tty.c
45111 @@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45113 #ifdef ISDN_DEBUG_MODEM_OPEN
45114 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45116 + atomic_read(&port->count));
45119 + atomic_inc(&port->count);
45122 * Start up serial port
45123 @@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45127 - if ((tty->count == 1) && (port->count != 1)) {
45128 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45130 * Uh, oh. tty->count is 1, which means that the tty
45131 * structure will be freed. Info->count should always
45132 @@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45133 * serial port won't be shutdown.
45135 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45136 - "info->count is %d\n", port->count);
45138 + "info->count is %d\n", atomic_read(&port->count));
45139 + atomic_set(&port->count, 1);
45141 - if (--port->count < 0) {
45142 + if (atomic_dec_return(&port->count) < 0) {
45143 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45144 - info->line, port->count);
45146 + info->line, atomic_read(&port->count));
45147 + atomic_set(&port->count, 0);
45149 - if (port->count) {
45150 + if (atomic_read(&port->count)) {
45151 #ifdef ISDN_DEBUG_MODEM_OPEN
45152 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45154 @@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45155 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45157 isdn_tty_shutdown(info);
45159 + atomic_set(&port->count, 0);
45160 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45162 wake_up_interruptible(&port->open_wait);
45163 @@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45164 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45165 modem_info *info = &dev->mdm.info[i];
45167 - if (info->port.count == 0)
45168 + if (atomic_read(&info->port.count) == 0)
45170 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45171 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45172 diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45173 index e2d4e58..40cd045 100644
45174 --- a/drivers/isdn/i4l/isdn_x25iface.c
45175 +++ b/drivers/isdn/i4l/isdn_x25iface.c
45176 @@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45179 static struct concap_proto_ops ix25_pops = {
45180 - &isdn_x25iface_proto_new,
45181 - &isdn_x25iface_proto_del,
45182 - &isdn_x25iface_proto_restart,
45183 - &isdn_x25iface_proto_close,
45184 - &isdn_x25iface_xmit,
45185 - &isdn_x25iface_receive,
45186 - &isdn_x25iface_connect_ind,
45187 - &isdn_x25iface_disconn_ind
45188 + .proto_new = &isdn_x25iface_proto_new,
45189 + .proto_del = &isdn_x25iface_proto_del,
45190 + .restart = &isdn_x25iface_proto_restart,
45191 + .close = &isdn_x25iface_proto_close,
45192 + .encap_and_xmit = &isdn_x25iface_xmit,
45193 + .data_ind = &isdn_x25iface_receive,
45194 + .connect_ind = &isdn_x25iface_connect_ind,
45195 + .disconn_ind = &isdn_x25iface_disconn_ind
45198 /* error message helper function */
45199 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45200 index 358a574..b4987ea 100644
45201 --- a/drivers/isdn/icn/icn.c
45202 +++ b/drivers/isdn/icn/icn.c
45203 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45207 - if (copy_from_user(msg, buf, count))
45208 + if (count > sizeof msg || copy_from_user(msg, buf, count))
45211 memcpy(msg, buf, count);
45212 diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45213 index 52c4382..09e0c7c 100644
45214 --- a/drivers/isdn/mISDN/dsp_cmx.c
45215 +++ b/drivers/isdn/mISDN/dsp_cmx.c
45216 @@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45217 static u16 dsp_count; /* last sample count */
45218 static int dsp_count_valid; /* if we have last sample count */
45221 +void __intentional_overflow(-1)
45222 dsp_cmx_send(void *arg)
45224 struct dsp_conf *conf;
45225 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45226 index 312ffd3..9263d05 100644
45227 --- a/drivers/lguest/core.c
45228 +++ b/drivers/lguest/core.c
45229 @@ -96,9 +96,17 @@ static __init int map_switcher(void)
45230 * The end address needs +1 because __get_vm_area allocates an
45231 * extra guard page, so we need space for that.
45234 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45235 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45236 + VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45237 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45239 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45240 VM_ALLOC, switcher_addr, switcher_addr
45241 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45244 if (!switcher_vma) {
45246 printk("lguest: could not map switcher pages high\n");
45247 @@ -121,7 +129,7 @@ static __init int map_switcher(void)
45248 * Now the Switcher is mapped at the right address, we can't fail!
45249 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45251 - memcpy(switcher_vma->addr, start_switcher_text,
45252 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45253 end_switcher_text - start_switcher_text);
45255 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45256 diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45257 index e3abebc9..6a35328 100644
45258 --- a/drivers/lguest/page_tables.c
45259 +++ b/drivers/lguest/page_tables.c
45260 @@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45263 #ifdef CONFIG_X86_PAE
45264 -static void release_pmd(pmd_t *spmd)
45265 +static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45267 /* If the entry's not present, there's nothing to release. */
45268 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45269 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45270 index 30f2aef..391c748 100644
45271 --- a/drivers/lguest/x86/core.c
45272 +++ b/drivers/lguest/x86/core.c
45273 @@ -60,7 +60,7 @@ static struct {
45274 /* Offset from where switcher.S was compiled to where we've copied it */
45275 static unsigned long switcher_offset(void)
45277 - return switcher_addr - (unsigned long)start_switcher_text;
45278 + return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45281 /* This cpu's struct lguest_pages (after the Switcher text page) */
45282 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45283 * These copies are pretty cheap, so we do them unconditionally: */
45284 /* Save the current Host top-level page directory.
45287 +#ifdef CONFIG_PAX_PER_CPU_PGD
45288 + pages->state.host_cr3 = read_cr3();
45290 pages->state.host_cr3 = __pa(current->mm->pgd);
45294 * Set up the Guest's page tables to see this CPU's pages (and no
45295 * other CPU's pages).
45296 @@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45297 * compiled-in switcher code and the high-mapped copy we just made.
45299 for (i = 0; i < IDT_ENTRIES; i++)
45300 - default_idt_entries[i] += switcher_offset();
45301 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45304 * Set up the Switcher's per-cpu areas.
45305 @@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45306 * it will be undisturbed when we switch. To change %cs and jump we
45307 * need this structure to feed to Intel's "lcall" instruction.
45309 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45310 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45311 lguest_entry.segment = LGUEST_CS;
45314 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45315 index 40634b0..4f5855e 100644
45316 --- a/drivers/lguest/x86/switcher_32.S
45317 +++ b/drivers/lguest/x86/switcher_32.S
45319 #include <asm/page.h>
45320 #include <asm/segment.h>
45321 #include <asm/lguest.h>
45322 +#include <asm/processor-flags.h>
45324 // We mark the start of the code to copy
45325 // It's placed in .text tho it's never run here
45326 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45327 // Changes type when we load it: damn Intel!
45328 // For after we switch over our page tables
45329 // That entry will be read-only: we'd crash.
45331 +#ifdef CONFIG_PAX_KERNEXEC
45333 + xor $X86_CR0_WP, %edx
45337 movl $(GDT_ENTRY_TSS*8), %edx
45340 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45341 // Let's clear it again for our return.
45342 // The GDT descriptor of the Host
45343 // Points to the table after two "size" bytes
45344 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45345 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45346 // Clear "used" from type field (byte 5, bit 2)
45347 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45348 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45350 +#ifdef CONFIG_PAX_KERNEXEC
45352 + xor $X86_CR0_WP, %eax
45356 // Once our page table's switched, the Guest is live!
45357 // The Host fades as we run this final step.
45358 @@ -295,13 +309,12 @@ deliver_to_host:
45359 // I consulted gcc, and it gave
45360 // These instructions, which I gladly credit:
45361 leal (%edx,%ebx,8), %eax
45362 - movzwl (%eax),%edx
45363 - movl 4(%eax), %eax
45366 + movl 4(%eax), %edx
45368 // Now the address of the handler's in %edx
45369 // We call it now: its "iret" drops us home.
45371 + ljmp $__KERNEL_CS, $1f
45374 // Every interrupt can come to us here
45375 // But we must truly tell each apart.
45376 diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45377 index a08e3ee..df8ade2 100644
45378 --- a/drivers/md/bcache/closure.h
45379 +++ b/drivers/md/bcache/closure.h
45380 @@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45381 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45382 struct workqueue_struct *wq)
45384 - BUG_ON(object_is_on_stack(cl));
45385 + BUG_ON(object_starts_on_stack(cl));
45386 closure_set_ip(cl);
45389 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45390 index 135a090..f7872f6 100644
45391 --- a/drivers/md/bitmap.c
45392 +++ b/drivers/md/bitmap.c
45393 @@ -1927,7 +1927,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45394 chunk_kb ? "KB" : "B");
45395 if (bitmap->storage.file) {
45396 seq_printf(seq, ", file: ");
45397 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45398 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45401 seq_printf(seq, "\n");
45402 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45403 index 720ceeb..030f1d4 100644
45404 --- a/drivers/md/dm-ioctl.c
45405 +++ b/drivers/md/dm-ioctl.c
45406 @@ -1773,7 +1773,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45407 cmd == DM_LIST_VERSIONS_CMD)
45410 - if ((cmd == DM_DEV_CREATE_CMD)) {
45411 + if (cmd == DM_DEV_CREATE_CMD) {
45412 if (!*param->name) {
45413 DMWARN("name not supplied when creating device");
45415 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45416 index 089d627..ef7352e 100644
45417 --- a/drivers/md/dm-raid1.c
45418 +++ b/drivers/md/dm-raid1.c
45419 @@ -40,7 +40,7 @@ enum dm_raid1_error {
45422 struct mirror_set *ms;
45423 - atomic_t error_count;
45424 + atomic_unchecked_t error_count;
45425 unsigned long error_type;
45426 struct dm_dev *dev;
45428 @@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45431 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45432 - if (!atomic_read(&m->error_count))
45433 + if (!atomic_read_unchecked(&m->error_count))
45437 @@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45438 * simple way to tell if a device has encountered
45441 - atomic_inc(&m->error_count);
45442 + atomic_inc_unchecked(&m->error_count);
45444 if (test_and_set_bit(error_type, &m->error_type))
45446 @@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45447 struct mirror *m = get_default_mirror(ms);
45450 - if (likely(!atomic_read(&m->error_count)))
45451 + if (likely(!atomic_read_unchecked(&m->error_count)))
45454 if (m-- == ms->mirror)
45455 @@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45457 struct mirror *default_mirror = get_default_mirror(m->ms);
45459 - return !atomic_read(&default_mirror->error_count);
45460 + return !atomic_read_unchecked(&default_mirror->error_count);
45463 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45464 @@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45466 if (likely(region_in_sync(ms, region, 1)))
45467 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45468 - else if (m && atomic_read(&m->error_count))
45469 + else if (m && atomic_read_unchecked(&m->error_count))
45473 @@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45476 ms->mirror[mirror].ms = ms;
45477 - atomic_set(&(ms->mirror[mirror].error_count), 0);
45478 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45479 ms->mirror[mirror].error_type = 0;
45480 ms->mirror[mirror].offset = offset;
45482 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45484 static char device_status_char(struct mirror *m)
45486 - if (!atomic_read(&(m->error_count)))
45487 + if (!atomic_read_unchecked(&(m->error_count)))
45490 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45491 diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45492 index f478a4c..4b8e5ef 100644
45493 --- a/drivers/md/dm-stats.c
45494 +++ b/drivers/md/dm-stats.c
45495 @@ -382,7 +382,7 @@ do_sync_free:
45496 synchronize_rcu_expedited();
45497 dm_stat_free(&s->rcu_head);
45499 - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45500 + ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45501 call_rcu(&s->rcu_head, dm_stat_free);
45504 @@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45505 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45506 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45508 - ACCESS_ONCE(last->last_sector) = end_sector;
45509 - ACCESS_ONCE(last->last_rw) = bi_rw;
45510 + ACCESS_ONCE_RW(last->last_sector) = end_sector;
45511 + ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45515 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45516 index f8b37d4..5c5cafd 100644
45517 --- a/drivers/md/dm-stripe.c
45518 +++ b/drivers/md/dm-stripe.c
45519 @@ -21,7 +21,7 @@ struct stripe {
45520 struct dm_dev *dev;
45521 sector_t physical_start;
45523 - atomic_t error_count;
45524 + atomic_unchecked_t error_count;
45528 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45532 - atomic_set(&(sc->stripe[i].error_count), 0);
45533 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45537 @@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45538 DMEMIT("%d ", sc->stripes);
45539 for (i = 0; i < sc->stripes; i++) {
45540 DMEMIT("%s ", sc->stripe[i].dev->name);
45541 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45542 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45546 @@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45548 for (i = 0; i < sc->stripes; i++)
45549 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45550 - atomic_inc(&(sc->stripe[i].error_count));
45551 - if (atomic_read(&(sc->stripe[i].error_count)) <
45552 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
45553 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45554 DM_IO_ERROR_THRESHOLD)
45555 schedule_work(&sc->trigger_event);
45557 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45558 index 16ba55a..31af906 100644
45559 --- a/drivers/md/dm-table.c
45560 +++ b/drivers/md/dm-table.c
45561 @@ -305,7 +305,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45565 - if ((start >= dev_size) || (start + len > dev_size)) {
45566 + if ((start >= dev_size) || (len > dev_size - start)) {
45567 DMWARN("%s: %s too small for target: "
45568 "start=%llu, len=%llu, dev_size=%llu",
45569 dm_device_name(ti->table->md), bdevname(bdev, b),
45570 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45571 index 79f6941..b33b4e0 100644
45572 --- a/drivers/md/dm-thin-metadata.c
45573 +++ b/drivers/md/dm-thin-metadata.c
45574 @@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45576 pmd->info.tm = pmd->tm;
45577 pmd->info.levels = 2;
45578 - pmd->info.value_type.context = pmd->data_sm;
45579 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45580 pmd->info.value_type.size = sizeof(__le64);
45581 pmd->info.value_type.inc = data_block_inc;
45582 pmd->info.value_type.dec = data_block_dec;
45583 @@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45585 pmd->bl_info.tm = pmd->tm;
45586 pmd->bl_info.levels = 1;
45587 - pmd->bl_info.value_type.context = pmd->data_sm;
45588 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45589 pmd->bl_info.value_type.size = sizeof(__le64);
45590 pmd->bl_info.value_type.inc = data_block_inc;
45591 pmd->bl_info.value_type.dec = data_block_dec;
45592 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45593 index 2caf492..0c0dcac 100644
45594 --- a/drivers/md/dm.c
45595 +++ b/drivers/md/dm.c
45596 @@ -191,9 +191,9 @@ struct mapped_device {
45600 - atomic_t event_nr;
45601 + atomic_unchecked_t event_nr;
45602 wait_queue_head_t eventq;
45603 - atomic_t uevent_seq;
45604 + atomic_unchecked_t uevent_seq;
45605 struct list_head uevent_list;
45606 spinlock_t uevent_lock; /* Protect access to uevent_list */
45608 @@ -2298,8 +2298,8 @@ static struct mapped_device *alloc_dev(int minor)
45609 spin_lock_init(&md->deferred_lock);
45610 atomic_set(&md->holders, 1);
45611 atomic_set(&md->open_count, 0);
45612 - atomic_set(&md->event_nr, 0);
45613 - atomic_set(&md->uevent_seq, 0);
45614 + atomic_set_unchecked(&md->event_nr, 0);
45615 + atomic_set_unchecked(&md->uevent_seq, 0);
45616 INIT_LIST_HEAD(&md->uevent_list);
45617 INIT_LIST_HEAD(&md->table_devices);
45618 spin_lock_init(&md->uevent_lock);
45619 @@ -2466,7 +2466,7 @@ static void event_callback(void *context)
45621 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45623 - atomic_inc(&md->event_nr);
45624 + atomic_inc_unchecked(&md->event_nr);
45625 wake_up(&md->eventq);
45628 @@ -3465,18 +3465,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45630 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45632 - return atomic_add_return(1, &md->uevent_seq);
45633 + return atomic_add_return_unchecked(1, &md->uevent_seq);
45636 uint32_t dm_get_event_nr(struct mapped_device *md)
45638 - return atomic_read(&md->event_nr);
45639 + return atomic_read_unchecked(&md->event_nr);
45642 int dm_wait_event(struct mapped_device *md, int event_nr)
45644 return wait_event_interruptible(md->eventq,
45645 - (event_nr != atomic_read(&md->event_nr)));
45646 + (event_nr != atomic_read_unchecked(&md->event_nr)));
45649 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45650 diff --git a/drivers/md/md.c b/drivers/md/md.c
45651 index 4dbed4a..bed2a6a 100644
45652 --- a/drivers/md/md.c
45653 +++ b/drivers/md/md.c
45654 @@ -197,10 +197,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45655 * start build, activate spare
45657 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45658 -static atomic_t md_event_count;
45659 +static atomic_unchecked_t md_event_count;
45660 void md_new_event(struct mddev *mddev)
45662 - atomic_inc(&md_event_count);
45663 + atomic_inc_unchecked(&md_event_count);
45664 wake_up(&md_event_waiters);
45666 EXPORT_SYMBOL_GPL(md_new_event);
45667 @@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45669 static void md_new_event_inintr(struct mddev *mddev)
45671 - atomic_inc(&md_event_count);
45672 + atomic_inc_unchecked(&md_event_count);
45673 wake_up(&md_event_waiters);
45676 @@ -1449,7 +1449,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45677 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45678 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45679 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45680 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45681 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45683 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45684 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45685 @@ -1700,7 +1700,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45687 sb->resync_offset = cpu_to_le64(0);
45689 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45690 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45692 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45693 sb->size = cpu_to_le64(mddev->dev_sectors);
45694 @@ -2624,7 +2624,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45696 errors_show(struct md_rdev *rdev, char *page)
45698 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45699 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45703 @@ -2633,7 +2633,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45705 unsigned long n = simple_strtoul(buf, &e, 10);
45706 if (*buf && (*e == 0 || *e == '\n')) {
45707 - atomic_set(&rdev->corrected_errors, n);
45708 + atomic_set_unchecked(&rdev->corrected_errors, n);
45712 @@ -3069,8 +3069,8 @@ int md_rdev_init(struct md_rdev *rdev)
45713 rdev->sb_loaded = 0;
45714 rdev->bb_page = NULL;
45715 atomic_set(&rdev->nr_pending, 0);
45716 - atomic_set(&rdev->read_errors, 0);
45717 - atomic_set(&rdev->corrected_errors, 0);
45718 + atomic_set_unchecked(&rdev->read_errors, 0);
45719 + atomic_set_unchecked(&rdev->corrected_errors, 0);
45721 INIT_LIST_HEAD(&rdev->same_set);
45722 init_waitqueue_head(&rdev->blocked_wait);
45723 @@ -7232,7 +7232,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45725 spin_unlock(&pers_lock);
45726 seq_printf(seq, "\n");
45727 - seq->poll_event = atomic_read(&md_event_count);
45728 + seq->poll_event = atomic_read_unchecked(&md_event_count);
45731 if (v == (void*)2) {
45732 @@ -7335,7 +7335,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45735 seq = file->private_data;
45736 - seq->poll_event = atomic_read(&md_event_count);
45737 + seq->poll_event = atomic_read_unchecked(&md_event_count);
45741 @@ -7352,7 +7352,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45742 /* always allow read */
45743 mask = POLLIN | POLLRDNORM;
45745 - if (seq->poll_event != atomic_read(&md_event_count))
45746 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45747 mask |= POLLERR | POLLPRI;
45750 @@ -7448,7 +7448,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45751 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45752 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45753 (int)part_stat_read(&disk->part0, sectors[1]) -
45754 - atomic_read(&disk->sync_io);
45755 + atomic_read_unchecked(&disk->sync_io);
45756 /* sync IO will cause sync_io to increase before the disk_stats
45757 * as sync_io is counted when a request starts, and
45758 * disk_stats is counted when it completes.
45759 diff --git a/drivers/md/md.h b/drivers/md/md.h
45760 index 4046a6c..e2f2997 100644
45761 --- a/drivers/md/md.h
45762 +++ b/drivers/md/md.h
45763 @@ -95,13 +95,13 @@ struct md_rdev {
45764 * only maintained for arrays that
45765 * support hot removal
45767 - atomic_t read_errors; /* number of consecutive read errors that
45768 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
45769 * we have tried to ignore.
45771 struct timespec last_read_error; /* monotonic time since our
45774 - atomic_t corrected_errors; /* number of corrected read errors,
45775 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45776 * for reporting to userspace and storing
45779 @@ -486,7 +486,7 @@ extern void mddev_unlock(struct mddev *mddev);
45781 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45783 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45784 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45787 struct md_personality
45788 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45789 index e8a9042..35bd145 100644
45790 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
45791 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45792 @@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45793 * Flick into a mode where all blocks get allocated in the new area.
45795 smm->begin = old_len;
45796 - memcpy(sm, &bootstrap_ops, sizeof(*sm));
45797 + memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45801 @@ -714,7 +714,7 @@ out:
45803 * Switch back to normal behaviour.
45805 - memcpy(sm, &ops, sizeof(*sm));
45806 + memcpy((void *)sm, &ops, sizeof(*sm));
45810 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45811 index 3e6d115..ffecdeb 100644
45812 --- a/drivers/md/persistent-data/dm-space-map.h
45813 +++ b/drivers/md/persistent-data/dm-space-map.h
45814 @@ -71,6 +71,7 @@ struct dm_space_map {
45815 dm_sm_threshold_fn fn,
45818 +typedef struct dm_space_map __no_const dm_space_map_no_const;
45820 /*----------------------------------------------------------------*/
45822 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45823 index 9157a29..0d462f0 100644
45824 --- a/drivers/md/raid1.c
45825 +++ b/drivers/md/raid1.c
45826 @@ -1934,7 +1934,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45827 if (r1_sync_page_io(rdev, sect, s,
45828 bio->bi_io_vec[idx].bv_page,
45830 - atomic_add(s, &rdev->corrected_errors);
45831 + atomic_add_unchecked(s, &rdev->corrected_errors);
45835 @@ -2167,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45836 !test_bit(Faulty, &rdev->flags)) {
45837 if (r1_sync_page_io(rdev, sect, s,
45838 conf->tmppage, READ)) {
45839 - atomic_add(s, &rdev->corrected_errors);
45840 + atomic_add_unchecked(s, &rdev->corrected_errors);
45842 "md/raid1:%s: read error corrected "
45843 "(%d sectors at %llu on %s)\n",
45844 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45845 index f55c3f3..4cca8c8 100644
45846 --- a/drivers/md/raid10.c
45847 +++ b/drivers/md/raid10.c
45848 @@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45849 /* The write handler will notice the lack of
45850 * R10BIO_Uptodate and record any errors etc
45852 - atomic_add(r10_bio->sectors,
45853 + atomic_add_unchecked(r10_bio->sectors,
45854 &conf->mirrors[d].rdev->corrected_errors);
45856 /* for reconstruct, we always reschedule after a read.
45857 @@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45859 struct timespec cur_time_mon;
45860 unsigned long hours_since_last;
45861 - unsigned int read_errors = atomic_read(&rdev->read_errors);
45862 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45864 ktime_get_ts(&cur_time_mon);
45866 @@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45867 * overflowing the shift of read_errors by hours_since_last.
45869 if (hours_since_last >= 8 * sizeof(read_errors))
45870 - atomic_set(&rdev->read_errors, 0);
45871 + atomic_set_unchecked(&rdev->read_errors, 0);
45873 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45874 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45877 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45878 @@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45881 check_decay_read_errors(mddev, rdev);
45882 - atomic_inc(&rdev->read_errors);
45883 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
45884 + atomic_inc_unchecked(&rdev->read_errors);
45885 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45886 char b[BDEVNAME_SIZE];
45887 bdevname(rdev->bdev, b);
45889 @@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45890 "md/raid10:%s: %s: Raid device exceeded "
45891 "read_error threshold [cur %d:max %d]\n",
45893 - atomic_read(&rdev->read_errors), max_read_errors);
45894 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45896 "md/raid10:%s: %s: Failing raid device\n",
45898 @@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45900 choose_data_offset(r10_bio, rdev)),
45901 bdevname(rdev->bdev, b));
45902 - atomic_add(s, &rdev->corrected_errors);
45903 + atomic_add_unchecked(s, &rdev->corrected_errors);
45906 rdev_dec_pending(rdev, mddev);
45907 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45908 index b6793d2..92be2bc 100644
45909 --- a/drivers/md/raid5.c
45910 +++ b/drivers/md/raid5.c
45911 @@ -1108,23 +1108,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
45912 struct bio_vec bvl;
45913 struct bvec_iter iter;
45914 struct page *bio_page;
45917 struct async_submit_ctl submit;
45918 enum async_tx_flags flags = 0;
45920 if (bio->bi_iter.bi_sector >= sector)
45921 - page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
45922 + page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
45924 - page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
45925 + page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
45928 flags |= ASYNC_TX_FENCE;
45929 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
45931 bio_for_each_segment(bvl, bio, iter) {
45932 - int len = bvl.bv_len;
45934 - int b_offset = 0;
45935 + s64 len = bvl.bv_len;
45937 + s64 b_offset = 0;
45939 if (page_offset < 0) {
45940 b_offset = -page_offset;
45941 @@ -2017,6 +2017,10 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
45945 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45946 +static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45949 static int grow_stripes(struct r5conf *conf, int num)
45951 struct kmem_cache *sc;
45952 @@ -2027,7 +2031,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45953 "raid%d-%s", conf->level, mdname(conf->mddev));
45955 sprintf(conf->cache_name[0],
45956 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45957 + "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45959 "raid%d-%p", conf->level, conf->mddev);
45961 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45963 conf->active_name = 0;
45964 @@ -2315,21 +2323,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45965 mdname(conf->mddev), STRIPE_SECTORS,
45966 (unsigned long long)s,
45967 bdevname(rdev->bdev, b));
45968 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45969 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45970 clear_bit(R5_ReadError, &sh->dev[i].flags);
45971 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45972 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45973 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45975 - if (atomic_read(&rdev->read_errors))
45976 - atomic_set(&rdev->read_errors, 0);
45977 + if (atomic_read_unchecked(&rdev->read_errors))
45978 + atomic_set_unchecked(&rdev->read_errors, 0);
45980 const char *bdn = bdevname(rdev->bdev, b);
45984 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45985 - atomic_inc(&rdev->read_errors);
45986 + atomic_inc_unchecked(&rdev->read_errors);
45987 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45988 printk_ratelimited(
45990 @@ -2357,7 +2365,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45991 mdname(conf->mddev),
45992 (unsigned long long)s,
45994 - } else if (atomic_read(&rdev->read_errors)
45995 + } else if (atomic_read_unchecked(&rdev->read_errors)
45996 > conf->max_nr_stripes)
45997 printk(KERN_WARNING
45998 "md/raid:%s: Too many read errors, failing device %s.\n",
45999 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46000 index 13bb57f..0ca21b2 100644
46001 --- a/drivers/media/dvb-core/dvbdev.c
46002 +++ b/drivers/media/dvb-core/dvbdev.c
46003 @@ -272,7 +272,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46004 const struct dvb_device *template, void *priv, int type)
46006 struct dvb_device *dvbdev;
46007 - struct file_operations *dvbdevfops;
46008 + file_operations_no_const *dvbdevfops;
46009 struct device *clsdev;
46012 diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46013 index 6ad22b6..6e90e2a 100644
46014 --- a/drivers/media/dvb-frontends/af9033.h
46015 +++ b/drivers/media/dvb-frontends/af9033.h
46016 @@ -96,6 +96,6 @@ struct af9033_ops {
46017 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46018 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46023 #endif /* AF9033_H */
46024 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46025 index 6ae9899..07d8543 100644
46026 --- a/drivers/media/dvb-frontends/dib3000.h
46027 +++ b/drivers/media/dvb-frontends/dib3000.h
46028 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46029 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46030 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46031 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46035 #if IS_REACHABLE(CONFIG_DVB_DIB3000MB)
46036 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46037 diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
46038 index baa2789..c8de7fe 100644
46039 --- a/drivers/media/dvb-frontends/dib7000p.h
46040 +++ b/drivers/media/dvb-frontends/dib7000p.h
46041 @@ -64,7 +64,7 @@ struct dib7000p_ops {
46042 int (*get_adc_power)(struct dvb_frontend *fe);
46043 int (*slave_reset)(struct dvb_frontend *fe);
46044 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
46048 #if IS_REACHABLE(CONFIG_DVB_DIB7000P)
46049 void *dib7000p_attach(struct dib7000p_ops *ops);
46050 diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
46051 index 780c37b..50e2620 100644
46052 --- a/drivers/media/dvb-frontends/dib8000.h
46053 +++ b/drivers/media/dvb-frontends/dib8000.h
46054 @@ -61,7 +61,7 @@ struct dib8000_ops {
46055 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
46056 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
46057 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
46061 #if IS_REACHABLE(CONFIG_DVB_DIB8000)
46062 void *dib8000_attach(struct dib8000_ops *ops);
46063 diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46064 index c9decd8..7849cec 100644
46065 --- a/drivers/media/pci/cx88/cx88-video.c
46066 +++ b/drivers/media/pci/cx88/cx88-video.c
46067 @@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46069 /* ------------------------------------------------------------------ */
46071 -static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46072 -static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46073 -static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46074 +static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46075 +static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46076 +static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46078 module_param_array(video_nr, int, NULL, 0444);
46079 module_param_array(vbi_nr, int, NULL, 0444);
46080 diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46081 index c2e60b4..5eeccc0 100644
46082 --- a/drivers/media/pci/ivtv/ivtv-driver.c
46083 +++ b/drivers/media/pci/ivtv/ivtv-driver.c
46084 @@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46085 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46087 /* ivtv instance counter */
46088 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
46089 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46091 /* Parameter declarations */
46092 static int cardtype[IVTV_MAX_CARDS];
46093 diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
46094 index 570d119..ed25830 100644
46095 --- a/drivers/media/pci/solo6x10/solo6x10-core.c
46096 +++ b/drivers/media/pci/solo6x10/solo6x10-core.c
46097 @@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
46099 static int solo_sysfs_init(struct solo_dev *solo_dev)
46101 - struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46102 + bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46103 struct device *dev = &solo_dev->dev;
46104 const char *driver;
46106 diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
46107 index 7ddc767..1c24361 100644
46108 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c
46109 +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
46110 @@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
46112 int solo_g723_init(struct solo_dev *solo_dev)
46114 - static struct snd_device_ops ops = { NULL };
46115 + static struct snd_device_ops ops = { };
46116 struct snd_card *card;
46117 struct snd_kcontrol_new kctl;
46119 diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46120 index 8c84846..27b4f83 100644
46121 --- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
46122 +++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46123 @@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
46125 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
46126 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
46127 - p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
46128 + p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
46132 diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
46133 index 1ca54b0..7d7cb9a 100644
46134 --- a/drivers/media/pci/solo6x10/solo6x10.h
46135 +++ b/drivers/media/pci/solo6x10/solo6x10.h
46136 @@ -218,7 +218,7 @@ struct solo_dev {
46138 /* P2M DMA Engine */
46139 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
46140 - atomic_t p2m_count;
46141 + atomic_unchecked_t p2m_count;
46143 unsigned int p2m_timeouts;
46145 diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
46146 index c135165..dc69499 100644
46147 --- a/drivers/media/pci/tw68/tw68-core.c
46148 +++ b/drivers/media/pci/tw68/tw68-core.c
46149 @@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
46150 module_param_array(card, int, NULL, 0444);
46151 MODULE_PARM_DESC(card, "card type");
46153 -static atomic_t tw68_instance = ATOMIC_INIT(0);
46154 +static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
46156 /* ------------------------------------------------------------------ */
46158 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46159 index 17b189a..b78aa6b 100644
46160 --- a/drivers/media/platform/omap/omap_vout.c
46161 +++ b/drivers/media/platform/omap/omap_vout.c
46162 @@ -63,7 +63,6 @@ enum omap_vout_channels {
46166 -static struct videobuf_queue_ops video_vbq_ops;
46167 /* Variables configurable through module params*/
46168 static u32 video1_numbuffers = 3;
46169 static u32 video2_numbuffers = 3;
46170 @@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
46172 struct videobuf_queue *q;
46173 struct omap_vout_device *vout = NULL;
46174 + static struct videobuf_queue_ops video_vbq_ops = {
46175 + .buf_setup = omap_vout_buffer_setup,
46176 + .buf_prepare = omap_vout_buffer_prepare,
46177 + .buf_release = omap_vout_buffer_release,
46178 + .buf_queue = omap_vout_buffer_queue,
46181 vout = video_drvdata(file);
46182 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46183 @@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46184 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46187 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46188 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46189 - video_vbq_ops.buf_release = omap_vout_buffer_release;
46190 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46191 spin_lock_init(&vout->vbq_lock);
46193 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46194 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46195 index fb2acc5..a2fcbdc4 100644
46196 --- a/drivers/media/platform/s5p-tv/mixer.h
46197 +++ b/drivers/media/platform/s5p-tv/mixer.h
46198 @@ -156,7 +156,7 @@ struct mxr_layer {
46199 /** layer index (unique identifier) */
46201 /** callbacks for layer methods */
46202 - struct mxr_layer_ops ops;
46203 + struct mxr_layer_ops *ops;
46204 /** format array */
46205 const struct mxr_format **fmt_array;
46206 /** size of format array */
46207 diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46208 index 74344c7..a39e70e 100644
46209 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46210 +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46211 @@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46213 struct mxr_layer *layer;
46215 - struct mxr_layer_ops ops = {
46216 + static struct mxr_layer_ops ops = {
46217 .release = mxr_graph_layer_release,
46218 .buffer_set = mxr_graph_buffer_set,
46219 .stream_set = mxr_graph_stream_set,
46220 diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46221 index b713403..53cb5ad 100644
46222 --- a/drivers/media/platform/s5p-tv/mixer_reg.c
46223 +++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46224 @@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46225 layer->update_buf = next;
46228 - layer->ops.buffer_set(layer, layer->update_buf);
46229 + layer->ops->buffer_set(layer, layer->update_buf);
46231 if (done && done != layer->shadow_buf)
46232 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46233 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46234 index 751f3b6..d829203 100644
46235 --- a/drivers/media/platform/s5p-tv/mixer_video.c
46236 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
46237 @@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46238 layer->geo.src.height = layer->geo.src.full_height;
46240 mxr_geometry_dump(mdev, &layer->geo);
46241 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46242 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46243 mxr_geometry_dump(mdev, &layer->geo);
46246 @@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46247 layer->geo.dst.full_width = mbus_fmt.width;
46248 layer->geo.dst.full_height = mbus_fmt.height;
46249 layer->geo.dst.field = mbus_fmt.field;
46250 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46251 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46253 mxr_geometry_dump(mdev, &layer->geo);
46255 @@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46256 /* set source size to highest accepted value */
46257 geo->src.full_width = max(geo->dst.full_width, pix->width);
46258 geo->src.full_height = max(geo->dst.full_height, pix->height);
46259 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46260 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46261 mxr_geometry_dump(mdev, &layer->geo);
46262 /* set cropping to total visible screen */
46263 geo->src.width = pix->width;
46264 @@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46265 geo->src.x_offset = 0;
46266 geo->src.y_offset = 0;
46267 /* assure consistency of geometry */
46268 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46269 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46270 mxr_geometry_dump(mdev, &layer->geo);
46271 /* set full size to lowest possible value */
46272 geo->src.full_width = 0;
46273 geo->src.full_height = 0;
46274 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46275 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46276 mxr_geometry_dump(mdev, &layer->geo);
46278 /* returning results */
46279 @@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46280 target->width = s->r.width;
46281 target->height = s->r.height;
46283 - layer->ops.fix_geometry(layer, stage, s->flags);
46284 + layer->ops->fix_geometry(layer, stage, s->flags);
46286 /* retrieve update selection rectangle */
46287 res.left = target->x_offset;
46288 @@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46289 mxr_output_get(mdev);
46291 mxr_layer_update_output(layer);
46292 - layer->ops.format_set(layer);
46293 + layer->ops->format_set(layer);
46294 /* enabling layer in hardware */
46295 spin_lock_irqsave(&layer->enq_slock, flags);
46296 layer->state = MXR_LAYER_STREAMING;
46297 spin_unlock_irqrestore(&layer->enq_slock, flags);
46299 - layer->ops.stream_set(layer, MXR_ENABLE);
46300 + layer->ops->stream_set(layer, MXR_ENABLE);
46301 mxr_streamer_get(mdev);
46304 @@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46305 spin_unlock_irqrestore(&layer->enq_slock, flags);
46307 /* disabling layer in hardware */
46308 - layer->ops.stream_set(layer, MXR_DISABLE);
46309 + layer->ops->stream_set(layer, MXR_DISABLE);
46310 /* remove one streamer */
46311 mxr_streamer_put(mdev);
46312 /* allow changes in output configuration */
46313 @@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46315 void mxr_layer_release(struct mxr_layer *layer)
46317 - if (layer->ops.release)
46318 - layer->ops.release(layer);
46319 + if (layer->ops->release)
46320 + layer->ops->release(layer);
46323 void mxr_base_layer_release(struct mxr_layer *layer)
46324 @@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46326 layer->mdev = mdev;
46328 - layer->ops = *ops;
46329 + layer->ops = ops;
46331 spin_lock_init(&layer->enq_slock);
46332 INIT_LIST_HEAD(&layer->enq_list);
46333 diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46334 index c9388c4..ce71ece 100644
46335 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46336 +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46337 @@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46339 struct mxr_layer *layer;
46341 - struct mxr_layer_ops ops = {
46342 + static struct mxr_layer_ops ops = {
46343 .release = mxr_vp_layer_release,
46344 .buffer_set = mxr_vp_buffer_set,
46345 .stream_set = mxr_vp_stream_set,
46346 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46347 index 82affae..42833ec 100644
46348 --- a/drivers/media/radio/radio-cadet.c
46349 +++ b/drivers/media/radio/radio-cadet.c
46350 @@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46351 unsigned char readbuf[RDS_BUFFER];
46354 + if (count > RDS_BUFFER)
46356 mutex_lock(&dev->lock);
46357 if (dev->rdsstat == 0)
46358 cadet_start_rds(dev);
46359 @@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46360 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46361 mutex_unlock(&dev->lock);
46363 - if (i && copy_to_user(data, readbuf, i))
46365 + if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46371 diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46372 index 5236035..c622c74 100644
46373 --- a/drivers/media/radio/radio-maxiradio.c
46374 +++ b/drivers/media/radio/radio-maxiradio.c
46375 @@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46376 /* TEA5757 pin mappings */
46377 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46379 -static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46380 +static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46382 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46383 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46384 diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46385 index 050b3bb..79f62b9 100644
46386 --- a/drivers/media/radio/radio-shark.c
46387 +++ b/drivers/media/radio/radio-shark.c
46388 @@ -79,7 +79,7 @@ struct shark_device {
46392 -static atomic_t shark_instance = ATOMIC_INIT(0);
46393 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46395 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46397 diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46398 index 8654e0d..0608a64 100644
46399 --- a/drivers/media/radio/radio-shark2.c
46400 +++ b/drivers/media/radio/radio-shark2.c
46401 @@ -74,7 +74,7 @@ struct shark_device {
46402 u8 *transfer_buffer;
46405 -static atomic_t shark_instance = ATOMIC_INIT(0);
46406 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46408 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46410 diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46411 index dccf586..d5db411 100644
46412 --- a/drivers/media/radio/radio-si476x.c
46413 +++ b/drivers/media/radio/radio-si476x.c
46414 @@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46415 struct si476x_radio *radio;
46416 struct v4l2_ctrl *ctrl;
46418 - static atomic_t instance = ATOMIC_INIT(0);
46419 + static atomic_unchecked_t instance = ATOMIC_INIT(0);
46421 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46423 diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46424 index 704397f..4d05977 100644
46425 --- a/drivers/media/radio/wl128x/fmdrv_common.c
46426 +++ b/drivers/media/radio/wl128x/fmdrv_common.c
46427 @@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46428 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46431 -static u32 radio_nr = -1;
46432 +static int radio_nr = -1;
46433 module_param(radio_nr, int, 0444);
46434 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46436 diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46437 index 9fd1527..8927230 100644
46438 --- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46439 +++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46440 @@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46442 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46444 - char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46446 - return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46447 - sizeof(result), 0);
46452 + buf = kmalloc(2, GFP_KERNEL);
46455 + result = kmalloc(64, GFP_KERNEL);
46456 + if (result == NULL) {
46461 + buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46462 + buf[1] = enable ? 1 : 0;
46464 + retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46471 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46473 - char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46475 - return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46480 + buf = kmalloc(2, GFP_KERNEL);
46483 + state = kmalloc(3, GFP_KERNEL);
46484 + if (state == NULL) {
46489 + buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46490 + buf[1] = enable ? 1 : 0;
46492 + retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46499 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46501 - char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46506 + query = kmalloc(1, GFP_KERNEL);
46507 + if (query == NULL)
46509 + state = kmalloc(3, GFP_KERNEL);
46510 + if (state == NULL) {
46515 + query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46517 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46519 - ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46520 - sizeof(state), 0);
46521 + ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46523 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46525 @@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46527 /* Copy this pointer as we are gonna need it in the release phase */
46528 cinergyt2_usb_device = adap->dev;
46535 @@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46536 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46538 struct cinergyt2_state *st = d->priv;
46539 - u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46543 + cmd = kmalloc(1, GFP_KERNEL);
46546 + key = kzalloc(5, GFP_KERNEL);
46547 + if (key == NULL) {
46552 + cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46554 *state = REMOTE_NO_KEY_PRESSED;
46556 - dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46557 + dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46558 if (key[4] == 0xff) {
46561 @@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46562 *event = d->last_event;
46563 deb_rc("repeat key, event %x\n",
46569 deb_rc("repeated key (non repeatable)\n");
46575 /* hack to pass checksum on the custom field */
46576 @@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46578 deb_rc("key: %*ph\n", 5, key);
46586 diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46587 index c890fe4..f9b2ae6 100644
46588 --- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46589 +++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46590 @@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46591 fe_status_t *status)
46593 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46594 - struct dvbt_get_status_msg result;
46595 - u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46596 + struct dvbt_get_status_msg *result;
46600 - ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46601 - sizeof(result), 0);
46602 + cmd = kmalloc(1, GFP_KERNEL);
46605 + result = kmalloc(sizeof(*result), GFP_KERNEL);
46606 + if (result == NULL) {
46611 + cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46613 + ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46614 + sizeof(*result), 0);
46621 - if (0xffff - le16_to_cpu(result.gain) > 30)
46622 + if (0xffff - le16_to_cpu(result->gain) > 30)
46623 *status |= FE_HAS_SIGNAL;
46624 - if (result.lock_bits & (1 << 6))
46625 + if (result->lock_bits & (1 << 6))
46626 *status |= FE_HAS_LOCK;
46627 - if (result.lock_bits & (1 << 5))
46628 + if (result->lock_bits & (1 << 5))
46629 *status |= FE_HAS_SYNC;
46630 - if (result.lock_bits & (1 << 4))
46631 + if (result->lock_bits & (1 << 4))
46632 *status |= FE_HAS_CARRIER;
46633 - if (result.lock_bits & (1 << 1))
46634 + if (result->lock_bits & (1 << 1))
46635 *status |= FE_HAS_VITERBI;
46637 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46638 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46639 *status &= ~FE_HAS_LOCK;
46648 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46650 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46651 - struct dvbt_get_status_msg status;
46652 - char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46653 + struct dvbt_get_status_msg *status;
46657 - ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46658 - sizeof(status), 0);
46659 + cmd = kmalloc(1, GFP_KERNEL);
46662 + status = kmalloc(sizeof(*status), GFP_KERNEL);
46663 + if (status == NULL) {
46668 + cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46670 + ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46671 + sizeof(*status), 0);
46676 - *ber = le32_to_cpu(status.viterbi_error_rate);
46677 + *ber = le32_to_cpu(status->viterbi_error_rate);
46684 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46686 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46687 - struct dvbt_get_status_msg status;
46688 - u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46689 + struct dvbt_get_status_msg *status;
46693 - ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46694 - sizeof(status), 0);
46695 + cmd = kmalloc(1, GFP_KERNEL);
46698 + status = kmalloc(sizeof(*status), GFP_KERNEL);
46699 + if (status == NULL) {
46704 + cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46706 + ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46707 + sizeof(*status), 0);
46709 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46714 - *unc = le32_to_cpu(status.uncorrected_block_count);
46716 + *unc = le32_to_cpu(status->uncorrected_block_count);
46724 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46727 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46728 - struct dvbt_get_status_msg status;
46729 - char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46730 + struct dvbt_get_status_msg *status;
46734 - ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46735 - sizeof(status), 0);
46736 + cmd = kmalloc(1, GFP_KERNEL);
46739 + status = kmalloc(sizeof(*status), GFP_KERNEL);
46740 + if (status == NULL) {
46745 + cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46747 + ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46748 + sizeof(*status), 0);
46750 err("cinergyt2_fe_read_signal_strength() Failed!"
46751 " (Error=%d)\n", ret);
46755 - *strength = (0xffff - le16_to_cpu(status.gain));
46756 + *strength = (0xffff - le16_to_cpu(status->gain));
46764 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46766 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46767 - struct dvbt_get_status_msg status;
46768 - char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46769 + struct dvbt_get_status_msg *status;
46773 - ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46774 - sizeof(status), 0);
46775 + cmd = kmalloc(1, GFP_KERNEL);
46778 + status = kmalloc(sizeof(*status), GFP_KERNEL);
46779 + if (status == NULL) {
46784 + cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46786 + ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46787 + sizeof(*status), 0);
46789 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46793 - *snr = (status.snr << 8) | status.snr;
46795 + *snr = (status->snr << 8) | status->snr;
46803 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46804 @@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46806 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46807 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46808 - struct dvbt_set_parameters_msg param;
46810 + struct dvbt_set_parameters_msg *param;
46814 - param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46815 - param.tps = cpu_to_le16(compute_tps(fep));
46816 - param.freq = cpu_to_le32(fep->frequency / 1000);
46818 + result = kmalloc(2, GFP_KERNEL);
46819 + if (result == NULL)
46821 + param = kmalloc(sizeof(*param), GFP_KERNEL);
46822 + if (param == NULL) {
46827 + param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46828 + param->tps = cpu_to_le16(compute_tps(fep));
46829 + param->freq = cpu_to_le32(fep->frequency / 1000);
46830 + param->flags = 0;
46832 switch (fep->bandwidth_hz) {
46835 - param.bandwidth = 8;
46836 + param->bandwidth = 8;
46839 - param.bandwidth = 7;
46840 + param->bandwidth = 7;
46843 - param.bandwidth = 6;
46844 + param->bandwidth = 6;
46848 err = dvb_usb_generic_rw(state->d,
46849 - (char *)¶m, sizeof(param),
46850 - result, sizeof(result), 0);
46851 + (char *)param, sizeof(*param),
46854 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46856 - return (err < 0) ? err : 0;
46862 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46863 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46864 index 733a7ff..f8b52e3 100644
46865 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46866 +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46867 @@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46869 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46871 - struct hexline hx;
46873 + struct hexline *hx;
46877 + reset = kmalloc(1, GFP_KERNEL);
46878 + if (reset == NULL)
46881 + hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46882 + if (hx == NULL) {
46889 - if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46891 + if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46892 err("could not stop the USB controller CPU.");
46894 - while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46895 - deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46896 - ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46897 + while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46898 + deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46899 + ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46901 - if (ret != hx.len) {
46902 + if (ret != hx->len) {
46903 err("error while transferring firmware "
46904 "(transferred size: %d, block size: %d)",
46912 err("firmware download failed at %d with %d",pos,ret);
46919 /* restart the CPU */
46921 - if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46923 + if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46924 err("could not restart the USB controller CPU.");
46935 EXPORT_SYMBOL(usb_cypress_load_firmware);
46936 diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46937 index 5801ae7..83f71fa 100644
46938 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46939 +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46940 @@ -87,8 +87,11 @@ struct technisat_usb2_state {
46941 static int technisat_usb2_i2c_access(struct usb_device *udev,
46942 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46945 - int ret, actual_length;
46946 + u8 *b = kmalloc(64, GFP_KERNEL);
46947 + int ret, actual_length, error = 0;
46952 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46953 debug_dump(tx, txlen, deb_i2c);
46954 @@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46957 err("i2c-error: out failed %02x = %d", device_addr, ret);
46963 ret = usb_bulk_msg(udev,
46964 @@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46965 b, 64, &actual_length, 1000);
46967 err("i2c-error: in failed %02x = %d", device_addr, ret);
46973 if (b[0] != I2C_STATUS_OK) {
46974 @@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46975 /* handle tuner-i2c-nak */
46976 if (!(b[0] == I2C_STATUS_NAK &&
46977 device_addr == 0x60
46978 - /* && device_is_technisat_usb2 */))
46980 + /* && device_is_technisat_usb2 */)) {
46986 deb_i2c("status: %d, ", b[0]);
46987 @@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46997 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46998 @@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47003 - red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47006 + u8 *led = kzalloc(8, GFP_KERNEL);
47011 if (disable_led_control && state != TECH_LED_OFF)
47014 + led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47019 @@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47020 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47021 USB_TYPE_VENDOR | USB_DIR_OUT,
47023 - led, sizeof(led), 500);
47026 mutex_unlock(&d->i2c_mutex);
47033 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47037 + u8 *b = kzalloc(1, GFP_KERNEL);
47042 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47044 @@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47045 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47046 USB_TYPE_VENDOR | USB_DIR_OUT,
47047 (red << 8) | green, 0,
47051 mutex_unlock(&d->i2c_mutex);
47058 @@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47059 struct dvb_usb_device_description **desc, int *cold)
47063 + u8 *version = kmalloc(3, GFP_KERNEL);
47065 /* first select the interface */
47066 if (usb_set_interface(udev, 0, 1) != 0)
47067 @@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47069 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47071 + if (version == NULL)
47074 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47075 GET_VERSION_INFO_VENDOR_REQUEST,
47076 USB_TYPE_VENDOR | USB_DIR_IN,
47078 - version, sizeof(version), 500);
47079 + version, 3, 500);
47083 @@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47092 @@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47094 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47099 struct ir_raw_event ev;
47101 + buf = kmalloc(62, GFP_KERNEL);
47106 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47109 @@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47110 GET_IR_DATA_VENDOR_REQUEST,
47111 USB_TYPE_VENDOR | USB_DIR_IN,
47113 - buf, sizeof(buf), 500);
47117 mutex_unlock(&d->i2c_mutex);
47128 return 0; /* no key pressed */
47133 @@ -656,6 +689,8 @@ unlock:
47135 ir_raw_event_handle(d->rc_dev);
47142 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47143 index af63543..0436f20 100644
47144 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47145 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47146 @@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47147 * by passing a very big num_planes value */
47148 uplane = compat_alloc_user_space(num_planes *
47149 sizeof(struct v4l2_plane));
47150 - kp->m.planes = (__force struct v4l2_plane *)uplane;
47151 + kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
47153 while (--num_planes >= 0) {
47154 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47155 @@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47156 if (num_planes == 0)
47159 - uplane = (__force struct v4l2_plane __user *)kp->m.planes;
47160 + uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47161 if (get_user(p, &up->m.planes))
47163 uplane32 = compat_ptr(p);
47164 @@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47165 get_user(kp->flags, &up->flags) ||
47166 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47168 - kp->base = (__force void *)compat_ptr(tmp);
47169 + kp->base = (__force_kernel void *)compat_ptr(tmp);
47173 @@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47174 n * sizeof(struct v4l2_ext_control32)))
47176 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47177 - kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47178 + kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47182 @@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47184 struct v4l2_ext_control32 __user *ucontrols;
47185 struct v4l2_ext_control __user *kcontrols =
47186 - (__force struct v4l2_ext_control __user *)kp->controls;
47187 + (struct v4l2_ext_control __force_user *)kp->controls;
47191 @@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47192 get_user(tmp, &up->edid) ||
47193 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47195 - kp->edid = (__force u8 *)compat_ptr(tmp);
47196 + kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47200 diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47201 index 5b0a30b..1974b38 100644
47202 --- a/drivers/media/v4l2-core/v4l2-device.c
47203 +++ b/drivers/media/v4l2-core/v4l2-device.c
47204 @@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47205 EXPORT_SYMBOL_GPL(v4l2_device_put);
47207 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47208 - atomic_t *instance)
47209 + atomic_unchecked_t *instance)
47211 - int num = atomic_inc_return(instance) - 1;
47212 + int num = atomic_inc_return_unchecked(instance) - 1;
47213 int len = strlen(basename);
47215 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47216 diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47217 index aa407cb..ee847d4 100644
47218 --- a/drivers/media/v4l2-core/v4l2-ioctl.c
47219 +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47220 @@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47221 struct file *file, void *fh, void *p);
47223 void (*debug)(const void *arg, bool write_only);
47226 +typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47228 /* This control needs a priority check */
47229 #define INFO_FL_PRIO (1 << 0)
47230 @@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47231 struct video_device *vfd = video_devdata(file);
47232 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47233 bool write_only = false;
47234 - struct v4l2_ioctl_info default_info;
47235 + v4l2_ioctl_info_no_const default_info;
47236 const struct v4l2_ioctl_info *info;
47237 void *fh = file->private_data;
47238 struct v4l2_fh *vfh = NULL;
47239 @@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47243 - *user_ptr = (void __user *)buf->m.planes;
47244 + *user_ptr = (void __force_user *)buf->m.planes;
47245 *kernel_ptr = (void **)&buf->m.planes;
47246 *array_size = sizeof(struct v4l2_plane) * buf->length;
47248 @@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47252 - *user_ptr = (void __user *)edid->edid;
47253 + *user_ptr = (void __force_user *)edid->edid;
47254 *kernel_ptr = (void **)&edid->edid;
47255 *array_size = edid->blocks * 128;
47257 @@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47261 - *user_ptr = (void __user *)ctrls->controls;
47262 + *user_ptr = (void __force_user *)ctrls->controls;
47263 *kernel_ptr = (void **)&ctrls->controls;
47264 *array_size = sizeof(struct v4l2_ext_control)
47266 @@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47269 if (has_array_args) {
47270 - *kernel_ptr = (void __force *)user_ptr;
47271 + *kernel_ptr = (void __force_kernel *)user_ptr;
47272 if (copy_to_user(user_ptr, mbuf, array_size))
47274 goto out_array_args;
47275 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47276 index c94ea0d..b8a9f88 100644
47277 --- a/drivers/memory/omap-gpmc.c
47278 +++ b/drivers/memory/omap-gpmc.c
47279 @@ -232,7 +232,6 @@ struct omap3_gpmc_regs {
47282 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47283 -static struct irq_chip gpmc_irq_chip;
47284 static int gpmc_irq_start;
47286 static struct resource gpmc_mem_root;
47287 @@ -1146,6 +1145,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47289 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47291 +static struct irq_chip gpmc_irq_chip = {
47293 + .irq_startup = gpmc_irq_noop_ret,
47294 + .irq_enable = gpmc_irq_enable,
47295 + .irq_disable = gpmc_irq_disable,
47296 + .irq_shutdown = gpmc_irq_noop,
47297 + .irq_ack = gpmc_irq_noop,
47298 + .irq_mask = gpmc_irq_noop,
47299 + .irq_unmask = gpmc_irq_noop,
47302 static int gpmc_setup_irq(void)
47305 @@ -1160,15 +1170,6 @@ static int gpmc_setup_irq(void)
47306 return gpmc_irq_start;
47309 - gpmc_irq_chip.name = "gpmc";
47310 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47311 - gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47312 - gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47313 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47314 - gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47315 - gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47316 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47318 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47319 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47321 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47322 index 187f836..679544b 100644
47323 --- a/drivers/message/fusion/mptbase.c
47324 +++ b/drivers/message/fusion/mptbase.c
47325 @@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47326 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47327 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47329 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47330 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47332 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47333 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47337 * Rounding UP to nearest 4-kB boundary here...
47339 @@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47340 ioc->facts.GlobalCredits);
47342 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47343 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47346 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47348 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47349 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47350 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47351 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47352 index 5bdaae1..eced16f 100644
47353 --- a/drivers/message/fusion/mptsas.c
47354 +++ b/drivers/message/fusion/mptsas.c
47355 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47359 +static inline void
47360 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47362 + if (phy_info->port_details) {
47363 + phy_info->port_details->rphy = rphy;
47364 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47365 + ioc->name, rphy));
47369 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47370 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47371 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47372 + ioc->name, rphy, rphy->dev.release));
47378 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47379 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47383 -static inline void
47384 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47386 - if (phy_info->port_details) {
47387 - phy_info->port_details->rphy = rphy;
47388 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47389 - ioc->name, rphy));
47393 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47394 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47395 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47396 - ioc->name, rphy, rphy->dev.release));
47400 static inline struct sas_port *
47401 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47403 diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47404 index cdd6f3d..1907a98 100644
47405 --- a/drivers/mfd/ab8500-debugfs.c
47406 +++ b/drivers/mfd/ab8500-debugfs.c
47407 @@ -100,7 +100,7 @@ static int irq_last;
47408 static u32 *irq_count;
47409 static int num_irqs;
47411 -static struct device_attribute **dev_attr;
47412 +static device_attribute_no_const **dev_attr;
47413 static char **event_name;
47415 static u8 avg_sample = SAMPLE_16;
47416 diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47417 index 8057849..0550fdf 100644
47418 --- a/drivers/mfd/kempld-core.c
47419 +++ b/drivers/mfd/kempld-core.c
47420 @@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47421 .remove = kempld_remove,
47424 -static struct dmi_system_id kempld_dmi_table[] __initdata = {
47425 +static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47429 diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47430 index c880c89..45a7c68 100644
47431 --- a/drivers/mfd/max8925-i2c.c
47432 +++ b/drivers/mfd/max8925-i2c.c
47433 @@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47434 const struct i2c_device_id *id)
47436 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47437 - static struct max8925_chip *chip;
47438 + struct max8925_chip *chip;
47439 struct device_node *node = client->dev.of_node;
47441 if (node && !pdata) {
47442 diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47443 index 7612d89..70549c2 100644
47444 --- a/drivers/mfd/tps65910.c
47445 +++ b/drivers/mfd/tps65910.c
47446 @@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47447 struct tps65910_platform_data *pdata)
47450 - static struct regmap_irq_chip *tps6591x_irqs_chip;
47451 + struct regmap_irq_chip *tps6591x_irqs_chip;
47454 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47455 diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47456 index 1b772ef..01e77d33 100644
47457 --- a/drivers/mfd/twl4030-irq.c
47458 +++ b/drivers/mfd/twl4030-irq.c
47460 #include <linux/of.h>
47461 #include <linux/irqdomain.h>
47462 #include <linux/i2c/twl.h>
47463 +#include <asm/pgtable.h>
47465 #include "twl-core.h"
47467 @@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47468 * Install an irq handler for each of the SIH modules;
47469 * clone dummy irq_chip since PIH can't *do* anything
47471 - twl4030_irq_chip = dummy_irq_chip;
47472 - twl4030_irq_chip.name = "twl4030";
47473 + pax_open_kernel();
47474 + memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47475 + *(const char **)&twl4030_irq_chip.name = "twl4030";
47477 - twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47478 + *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47479 + pax_close_kernel();
47481 for (i = irq_base; i < irq_end; i++) {
47482 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47483 diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47484 index 464419b..64bae8d 100644
47485 --- a/drivers/misc/c2port/core.c
47486 +++ b/drivers/misc/c2port/core.c
47487 @@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47488 goto error_idr_alloc;
47491 - bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47492 + pax_open_kernel();
47493 + *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47494 + pax_close_kernel();
47496 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47497 "c2port%d", c2dev->id);
47498 diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47499 index 8385177..2f54635 100644
47500 --- a/drivers/misc/eeprom/sunxi_sid.c
47501 +++ b/drivers/misc/eeprom/sunxi_sid.c
47502 @@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47504 platform_set_drvdata(pdev, sid_data);
47506 - sid_bin_attr.size = sid_data->keysize;
47507 + pax_open_kernel();
47508 + *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47509 + pax_close_kernel();
47510 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47513 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47514 index 36f5d52..32311c3 100644
47515 --- a/drivers/misc/kgdbts.c
47516 +++ b/drivers/misc/kgdbts.c
47517 @@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47518 char before[BREAK_INSTR_SIZE];
47519 char after[BREAK_INSTR_SIZE];
47521 - probe_kernel_read(before, (char *)kgdbts_break_test,
47522 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47524 init_simple_test();
47525 ts.tst = plant_and_detach_test;
47526 @@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47527 /* Activate test with initial breakpoint */
47530 - probe_kernel_read(after, (char *)kgdbts_break_test,
47531 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47533 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47534 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47535 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47536 index 4739689..8a52950 100644
47537 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
47538 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47539 @@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47540 * the lid is closed. This leads to interrupts as soon as a little move
47543 - atomic_inc(&lis3->count);
47544 + atomic_inc_unchecked(&lis3->count);
47546 wake_up_interruptible(&lis3->misc_wait);
47547 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47548 @@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47550 pm_runtime_get_sync(lis3->pm_dev);
47552 - atomic_set(&lis3->count, 0);
47553 + atomic_set_unchecked(&lis3->count, 0);
47557 @@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47558 add_wait_queue(&lis3->misc_wait, &wait);
47560 set_current_state(TASK_INTERRUPTIBLE);
47561 - data = atomic_xchg(&lis3->count, 0);
47562 + data = atomic_xchg_unchecked(&lis3->count, 0);
47566 @@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47567 struct lis3lv02d, miscdev);
47569 poll_wait(file, &lis3->misc_wait, wait);
47570 - if (atomic_read(&lis3->count))
47571 + if (atomic_read_unchecked(&lis3->count))
47572 return POLLIN | POLLRDNORM;
47575 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47576 index c439c82..1f20f57 100644
47577 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
47578 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47579 @@ -297,7 +297,7 @@ struct lis3lv02d {
47580 struct input_polled_dev *idev; /* input device */
47581 struct platform_device *pdev; /* platform device */
47582 struct regulator_bulk_data regulators[2];
47583 - atomic_t count; /* interrupt count after last read */
47584 + atomic_unchecked_t count; /* interrupt count after last read */
47585 union axis_conversion ac; /* hw -> logical axis */
47586 int mapped_btns[3];
47588 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47589 index 2f30bad..c4c13d0 100644
47590 --- a/drivers/misc/sgi-gru/gruhandles.c
47591 +++ b/drivers/misc/sgi-gru/gruhandles.c
47592 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47593 unsigned long nsec;
47595 nsec = CLKS2NSEC(clks);
47596 - atomic_long_inc(&mcs_op_statistics[op].count);
47597 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
47598 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47599 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47600 if (mcs_op_statistics[op].max < nsec)
47601 mcs_op_statistics[op].max = nsec;
47603 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47604 index 4f76359..cdfcb2e 100644
47605 --- a/drivers/misc/sgi-gru/gruprocfs.c
47606 +++ b/drivers/misc/sgi-gru/gruprocfs.c
47609 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47611 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47612 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47614 - unsigned long val = atomic_long_read(v);
47615 + unsigned long val = atomic_long_read_unchecked(v);
47617 seq_printf(s, "%16lu %s\n", val, id);
47619 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47621 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47622 for (op = 0; op < mcsop_last; op++) {
47623 - count = atomic_long_read(&mcs_op_statistics[op].count);
47624 - total = atomic_long_read(&mcs_op_statistics[op].total);
47625 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47626 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47627 max = mcs_op_statistics[op].max;
47628 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47629 count ? total / count : 0, max);
47630 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47631 index 5c3ce24..4915ccb 100644
47632 --- a/drivers/misc/sgi-gru/grutables.h
47633 +++ b/drivers/misc/sgi-gru/grutables.h
47634 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47637 struct gru_stats_s {
47638 - atomic_long_t vdata_alloc;
47639 - atomic_long_t vdata_free;
47640 - atomic_long_t gts_alloc;
47641 - atomic_long_t gts_free;
47642 - atomic_long_t gms_alloc;
47643 - atomic_long_t gms_free;
47644 - atomic_long_t gts_double_allocate;
47645 - atomic_long_t assign_context;
47646 - atomic_long_t assign_context_failed;
47647 - atomic_long_t free_context;
47648 - atomic_long_t load_user_context;
47649 - atomic_long_t load_kernel_context;
47650 - atomic_long_t lock_kernel_context;
47651 - atomic_long_t unlock_kernel_context;
47652 - atomic_long_t steal_user_context;
47653 - atomic_long_t steal_kernel_context;
47654 - atomic_long_t steal_context_failed;
47655 - atomic_long_t nopfn;
47656 - atomic_long_t asid_new;
47657 - atomic_long_t asid_next;
47658 - atomic_long_t asid_wrap;
47659 - atomic_long_t asid_reuse;
47660 - atomic_long_t intr;
47661 - atomic_long_t intr_cbr;
47662 - atomic_long_t intr_tfh;
47663 - atomic_long_t intr_spurious;
47664 - atomic_long_t intr_mm_lock_failed;
47665 - atomic_long_t call_os;
47666 - atomic_long_t call_os_wait_queue;
47667 - atomic_long_t user_flush_tlb;
47668 - atomic_long_t user_unload_context;
47669 - atomic_long_t user_exception;
47670 - atomic_long_t set_context_option;
47671 - atomic_long_t check_context_retarget_intr;
47672 - atomic_long_t check_context_unload;
47673 - atomic_long_t tlb_dropin;
47674 - atomic_long_t tlb_preload_page;
47675 - atomic_long_t tlb_dropin_fail_no_asid;
47676 - atomic_long_t tlb_dropin_fail_upm;
47677 - atomic_long_t tlb_dropin_fail_invalid;
47678 - atomic_long_t tlb_dropin_fail_range_active;
47679 - atomic_long_t tlb_dropin_fail_idle;
47680 - atomic_long_t tlb_dropin_fail_fmm;
47681 - atomic_long_t tlb_dropin_fail_no_exception;
47682 - atomic_long_t tfh_stale_on_fault;
47683 - atomic_long_t mmu_invalidate_range;
47684 - atomic_long_t mmu_invalidate_page;
47685 - atomic_long_t flush_tlb;
47686 - atomic_long_t flush_tlb_gru;
47687 - atomic_long_t flush_tlb_gru_tgh;
47688 - atomic_long_t flush_tlb_gru_zero_asid;
47689 + atomic_long_unchecked_t vdata_alloc;
47690 + atomic_long_unchecked_t vdata_free;
47691 + atomic_long_unchecked_t gts_alloc;
47692 + atomic_long_unchecked_t gts_free;
47693 + atomic_long_unchecked_t gms_alloc;
47694 + atomic_long_unchecked_t gms_free;
47695 + atomic_long_unchecked_t gts_double_allocate;
47696 + atomic_long_unchecked_t assign_context;
47697 + atomic_long_unchecked_t assign_context_failed;
47698 + atomic_long_unchecked_t free_context;
47699 + atomic_long_unchecked_t load_user_context;
47700 + atomic_long_unchecked_t load_kernel_context;
47701 + atomic_long_unchecked_t lock_kernel_context;
47702 + atomic_long_unchecked_t unlock_kernel_context;
47703 + atomic_long_unchecked_t steal_user_context;
47704 + atomic_long_unchecked_t steal_kernel_context;
47705 + atomic_long_unchecked_t steal_context_failed;
47706 + atomic_long_unchecked_t nopfn;
47707 + atomic_long_unchecked_t asid_new;
47708 + atomic_long_unchecked_t asid_next;
47709 + atomic_long_unchecked_t asid_wrap;
47710 + atomic_long_unchecked_t asid_reuse;
47711 + atomic_long_unchecked_t intr;
47712 + atomic_long_unchecked_t intr_cbr;
47713 + atomic_long_unchecked_t intr_tfh;
47714 + atomic_long_unchecked_t intr_spurious;
47715 + atomic_long_unchecked_t intr_mm_lock_failed;
47716 + atomic_long_unchecked_t call_os;
47717 + atomic_long_unchecked_t call_os_wait_queue;
47718 + atomic_long_unchecked_t user_flush_tlb;
47719 + atomic_long_unchecked_t user_unload_context;
47720 + atomic_long_unchecked_t user_exception;
47721 + atomic_long_unchecked_t set_context_option;
47722 + atomic_long_unchecked_t check_context_retarget_intr;
47723 + atomic_long_unchecked_t check_context_unload;
47724 + atomic_long_unchecked_t tlb_dropin;
47725 + atomic_long_unchecked_t tlb_preload_page;
47726 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47727 + atomic_long_unchecked_t tlb_dropin_fail_upm;
47728 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
47729 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
47730 + atomic_long_unchecked_t tlb_dropin_fail_idle;
47731 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
47732 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47733 + atomic_long_unchecked_t tfh_stale_on_fault;
47734 + atomic_long_unchecked_t mmu_invalidate_range;
47735 + atomic_long_unchecked_t mmu_invalidate_page;
47736 + atomic_long_unchecked_t flush_tlb;
47737 + atomic_long_unchecked_t flush_tlb_gru;
47738 + atomic_long_unchecked_t flush_tlb_gru_tgh;
47739 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47741 - atomic_long_t copy_gpa;
47742 - atomic_long_t read_gpa;
47743 + atomic_long_unchecked_t copy_gpa;
47744 + atomic_long_unchecked_t read_gpa;
47746 - atomic_long_t mesq_receive;
47747 - atomic_long_t mesq_receive_none;
47748 - atomic_long_t mesq_send;
47749 - atomic_long_t mesq_send_failed;
47750 - atomic_long_t mesq_noop;
47751 - atomic_long_t mesq_send_unexpected_error;
47752 - atomic_long_t mesq_send_lb_overflow;
47753 - atomic_long_t mesq_send_qlimit_reached;
47754 - atomic_long_t mesq_send_amo_nacked;
47755 - atomic_long_t mesq_send_put_nacked;
47756 - atomic_long_t mesq_page_overflow;
47757 - atomic_long_t mesq_qf_locked;
47758 - atomic_long_t mesq_qf_noop_not_full;
47759 - atomic_long_t mesq_qf_switch_head_failed;
47760 - atomic_long_t mesq_qf_unexpected_error;
47761 - atomic_long_t mesq_noop_unexpected_error;
47762 - atomic_long_t mesq_noop_lb_overflow;
47763 - atomic_long_t mesq_noop_qlimit_reached;
47764 - atomic_long_t mesq_noop_amo_nacked;
47765 - atomic_long_t mesq_noop_put_nacked;
47766 - atomic_long_t mesq_noop_page_overflow;
47767 + atomic_long_unchecked_t mesq_receive;
47768 + atomic_long_unchecked_t mesq_receive_none;
47769 + atomic_long_unchecked_t mesq_send;
47770 + atomic_long_unchecked_t mesq_send_failed;
47771 + atomic_long_unchecked_t mesq_noop;
47772 + atomic_long_unchecked_t mesq_send_unexpected_error;
47773 + atomic_long_unchecked_t mesq_send_lb_overflow;
47774 + atomic_long_unchecked_t mesq_send_qlimit_reached;
47775 + atomic_long_unchecked_t mesq_send_amo_nacked;
47776 + atomic_long_unchecked_t mesq_send_put_nacked;
47777 + atomic_long_unchecked_t mesq_page_overflow;
47778 + atomic_long_unchecked_t mesq_qf_locked;
47779 + atomic_long_unchecked_t mesq_qf_noop_not_full;
47780 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
47781 + atomic_long_unchecked_t mesq_qf_unexpected_error;
47782 + atomic_long_unchecked_t mesq_noop_unexpected_error;
47783 + atomic_long_unchecked_t mesq_noop_lb_overflow;
47784 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
47785 + atomic_long_unchecked_t mesq_noop_amo_nacked;
47786 + atomic_long_unchecked_t mesq_noop_put_nacked;
47787 + atomic_long_unchecked_t mesq_noop_page_overflow;
47791 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47792 tghop_invalidate, mcsop_last};
47794 struct mcs_op_statistic {
47795 - atomic_long_t count;
47796 - atomic_long_t total;
47797 + atomic_long_unchecked_t count;
47798 + atomic_long_unchecked_t total;
47802 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47804 #define STAT(id) do { \
47805 if (gru_options & OPT_STATS) \
47806 - atomic_long_inc(&gru_stats.id); \
47807 + atomic_long_inc_unchecked(&gru_stats.id); \
47810 #ifdef CONFIG_SGI_GRU_DEBUG
47811 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47812 index c862cd4..0d176fe 100644
47813 --- a/drivers/misc/sgi-xp/xp.h
47814 +++ b/drivers/misc/sgi-xp/xp.h
47815 @@ -288,7 +288,7 @@ struct xpc_interface {
47816 xpc_notify_func, void *);
47817 void (*received) (short, int, void *);
47818 enum xp_retval (*partid_to_nasids) (short, void *);
47822 extern struct xpc_interface xpc_interface;
47824 diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47825 index 01be66d..e3a0c7e 100644
47826 --- a/drivers/misc/sgi-xp/xp_main.c
47827 +++ b/drivers/misc/sgi-xp/xp_main.c
47828 @@ -78,13 +78,13 @@ xpc_notloaded(void)
47831 struct xpc_interface xpc_interface = {
47832 - (void (*)(int))xpc_notloaded,
47833 - (void (*)(int))xpc_notloaded,
47834 - (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47835 - (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47836 + .connect = (void (*)(int))xpc_notloaded,
47837 + .disconnect = (void (*)(int))xpc_notloaded,
47838 + .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47839 + .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47840 void *))xpc_notloaded,
47841 - (void (*)(short, int, void *))xpc_notloaded,
47842 - (enum xp_retval(*)(short, void *))xpc_notloaded
47843 + .received = (void (*)(short, int, void *))xpc_notloaded,
47844 + .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47846 EXPORT_SYMBOL_GPL(xpc_interface);
47848 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47849 index b94d5f7..7f494c5 100644
47850 --- a/drivers/misc/sgi-xp/xpc.h
47851 +++ b/drivers/misc/sgi-xp/xpc.h
47852 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
47853 void (*received_payload) (struct xpc_channel *, void *);
47854 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47856 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47858 /* struct xpc_partition act_state values (for XPC HB) */
47860 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47861 /* found in xpc_main.c */
47862 extern struct device *xpc_part;
47863 extern struct device *xpc_chan;
47864 -extern struct xpc_arch_operations xpc_arch_ops;
47865 +extern xpc_arch_operations_no_const xpc_arch_ops;
47866 extern int xpc_disengage_timelimit;
47867 extern int xpc_disengage_timedout;
47868 extern int xpc_activate_IRQ_rcvd;
47869 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47870 index 7f32712..8539ab2 100644
47871 --- a/drivers/misc/sgi-xp/xpc_main.c
47872 +++ b/drivers/misc/sgi-xp/xpc_main.c
47873 @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47874 .notifier_call = xpc_system_die,
47877 -struct xpc_arch_operations xpc_arch_ops;
47878 +xpc_arch_operations_no_const xpc_arch_ops;
47881 * Timer function to enforce the timelimit on the partition disengage.
47882 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47883 index 60f7141..ba97c1a 100644
47884 --- a/drivers/mmc/card/block.c
47885 +++ b/drivers/mmc/card/block.c
47886 @@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47887 if (idata->ic.postsleep_min_us)
47888 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47890 - if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47891 + if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47895 diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47896 index f45ab91..9f50d8f 100644
47897 --- a/drivers/mmc/host/dw_mmc.h
47898 +++ b/drivers/mmc/host/dw_mmc.h
47899 @@ -287,5 +287,5 @@ struct dw_mci_drv_data {
47900 int (*execute_tuning)(struct dw_mci_slot *slot);
47901 int (*prepare_hs400_tuning)(struct dw_mci *host,
47902 struct mmc_ios *ios);
47905 #endif /* _DW_MMC_H_ */
47906 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47907 index fb26674..3172c2b 100644
47908 --- a/drivers/mmc/host/mmci.c
47909 +++ b/drivers/mmc/host/mmci.c
47910 @@ -1633,7 +1633,9 @@ static int mmci_probe(struct amba_device *dev,
47911 mmc->caps |= MMC_CAP_CMD23;
47913 if (variant->busy_detect) {
47914 - mmci_ops.card_busy = mmci_card_busy;
47915 + pax_open_kernel();
47916 + *(void **)&mmci_ops.card_busy = mmci_card_busy;
47917 + pax_close_kernel();
47918 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47919 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47920 mmc->max_busy_timeout = 0;
47921 diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
47922 index 9df2b68..6d5ed1a 100644
47923 --- a/drivers/mmc/host/omap_hsmmc.c
47924 +++ b/drivers/mmc/host/omap_hsmmc.c
47925 @@ -2004,7 +2004,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
47927 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
47928 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
47929 - omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47930 + pax_open_kernel();
47931 + *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47932 + pax_close_kernel();
47935 pm_runtime_enable(host->dev);
47936 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47937 index 82f512d..5a228bb 100644
47938 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
47939 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47940 @@ -993,9 +993,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47941 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47944 - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47945 - sdhci_esdhc_ops.platform_execute_tuning =
47946 + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47947 + pax_open_kernel();
47948 + *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47949 esdhc_executing_tuning;
47950 + pax_close_kernel();
47953 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47954 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47955 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47956 index c6d2dd7..81b1ca3 100644
47957 --- a/drivers/mmc/host/sdhci-s3c.c
47958 +++ b/drivers/mmc/host/sdhci-s3c.c
47959 @@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47960 * we can use overriding functions instead of default.
47962 if (sc->no_divider) {
47963 - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47964 - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47965 - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47966 + pax_open_kernel();
47967 + *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47968 + *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47969 + *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47970 + pax_close_kernel();
47973 /* It supports additional host capabilities if needed */
47974 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47975 index 9a1a6ff..b8f1a57 100644
47976 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
47977 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47978 @@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47979 size_t totlen = 0, thislen;
47982 - static char *buffer;
47985 if (!ECCBUF_SIZE) {
47986 /* We should fall back to a general writev implementation.
47987 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47988 index 870c7fc..c7d6440 100644
47989 --- a/drivers/mtd/nand/denali.c
47990 +++ b/drivers/mtd/nand/denali.c
47992 #include <linux/slab.h>
47993 #include <linux/mtd/mtd.h>
47994 #include <linux/module.h>
47995 +#include <linux/slab.h>
47997 #include "denali.h"
47999 diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48000 index 1b8f350..990f2e9 100644
48001 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48002 +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48003 @@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48005 /* first try to map the upper buffer directly */
48006 if (virt_addr_valid(this->upper_buf) &&
48007 - !object_is_on_stack(this->upper_buf)) {
48008 + !object_starts_on_stack(this->upper_buf)) {
48009 sg_init_one(sgl, this->upper_buf, this->upper_len);
48010 ret = dma_map_sg(this->dev, sgl, 1, dr);
48012 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48013 index a5dfbfb..8042ab4 100644
48014 --- a/drivers/mtd/nftlmount.c
48015 +++ b/drivers/mtd/nftlmount.c
48017 #include <asm/errno.h>
48018 #include <linux/delay.h>
48019 #include <linux/slab.h>
48020 +#include <linux/sched.h>
48021 #include <linux/mtd/mtd.h>
48022 #include <linux/mtd/nand.h>
48023 #include <linux/mtd/nftl.h>
48024 diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48025 index c23184a..4115c41 100644
48026 --- a/drivers/mtd/sm_ftl.c
48027 +++ b/drivers/mtd/sm_ftl.c
48028 @@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48029 #define SM_CIS_VENDOR_OFFSET 0x59
48030 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48032 - struct attribute_group *attr_group;
48033 + attribute_group_no_const *attr_group;
48034 struct attribute **attributes;
48035 struct sm_sysfs_attribute *vendor_attribute;
48037 diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48038 index 7b11243..b3278a3 100644
48039 --- a/drivers/net/bonding/bond_netlink.c
48040 +++ b/drivers/net/bonding/bond_netlink.c
48041 @@ -585,7 +585,7 @@ nla_put_failure:
48045 -struct rtnl_link_ops bond_link_ops __read_mostly = {
48046 +struct rtnl_link_ops bond_link_ops = {
48048 .priv_size = sizeof(struct bonding),
48049 .setup = bond_setup,
48050 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
48051 index b3b922a..80bba38 100644
48052 --- a/drivers/net/caif/caif_hsi.c
48053 +++ b/drivers/net/caif/caif_hsi.c
48054 @@ -1444,7 +1444,7 @@ err:
48058 -static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
48059 +static struct rtnl_link_ops caif_hsi_link_ops = {
48061 .priv_size = sizeof(struct cfhsi),
48062 .setup = cfhsi_setup,
48063 diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48064 index e8c96b8..516a96c 100644
48065 --- a/drivers/net/can/Kconfig
48066 +++ b/drivers/net/can/Kconfig
48067 @@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48070 tristate "Support for Freescale FLEXCAN based chips"
48071 - depends on ARM || PPC
48072 + depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48074 Say Y here if you want to support for Freescale FlexCAN.
48076 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
48077 index e9b1810..5c2f3f9 100644
48078 --- a/drivers/net/can/dev.c
48079 +++ b/drivers/net/can/dev.c
48080 @@ -964,7 +964,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
48081 return -EOPNOTSUPP;
48084 -static struct rtnl_link_ops can_link_ops __read_mostly = {
48085 +static struct rtnl_link_ops can_link_ops = {
48087 .maxtype = IFLA_CAN_MAX,
48088 .policy = can_policy,
48089 diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
48090 index 0ce868d..e5dc8bd 100644
48091 --- a/drivers/net/can/vcan.c
48092 +++ b/drivers/net/can/vcan.c
48093 @@ -166,7 +166,7 @@ static void vcan_setup(struct net_device *dev)
48094 dev->destructor = free_netdev;
48097 -static struct rtnl_link_ops vcan_link_ops __read_mostly = {
48098 +static struct rtnl_link_ops vcan_link_ops = {
48100 .setup = vcan_setup,
48102 diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
48103 index 49adbf1..fff7ff8 100644
48104 --- a/drivers/net/dummy.c
48105 +++ b/drivers/net/dummy.c
48106 @@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
48110 -static struct rtnl_link_ops dummy_link_ops __read_mostly = {
48111 +static struct rtnl_link_ops dummy_link_ops = {
48113 .setup = dummy_setup,
48114 .validate = dummy_validate,
48115 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48116 index 0443654..4f0aa18 100644
48117 --- a/drivers/net/ethernet/8390/ax88796.c
48118 +++ b/drivers/net/ethernet/8390/ax88796.c
48119 @@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48120 if (ax->plat->reg_offsets)
48121 ei_local->reg_offset = ax->plat->reg_offsets;
48123 + resource_size_t _mem_size = mem_size;
48124 + do_div(_mem_size, 0x18);
48125 ei_local->reg_offset = ax->reg_offsets;
48126 for (ret = 0; ret < 0x18; ret++)
48127 - ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48128 + ax->reg_offsets[ret] = _mem_size * ret;
48131 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48132 diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48133 index da48e66..2dbec80 100644
48134 --- a/drivers/net/ethernet/altera/altera_tse_main.c
48135 +++ b/drivers/net/ethernet/altera/altera_tse_main.c
48136 @@ -1256,7 +1256,7 @@ static int tse_shutdown(struct net_device *dev)
48140 -static struct net_device_ops altera_tse_netdev_ops = {
48141 +static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48142 .ndo_open = tse_open,
48143 .ndo_stop = tse_shutdown,
48144 .ndo_start_xmit = tse_start_xmit,
48145 @@ -1493,11 +1493,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48146 ndev->netdev_ops = &altera_tse_netdev_ops;
48147 altera_tse_set_ethtool_ops(ndev);
48149 + pax_open_kernel();
48150 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48152 if (priv->hash_filter)
48153 altera_tse_netdev_ops.ndo_set_rx_mode =
48154 tse_set_rx_mode_hashfilter;
48155 + pax_close_kernel();
48157 /* Scatter/gather IO is not supported,
48158 * so it is turned off
48159 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48160 index 34c28aa..5e06567 100644
48161 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48162 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48163 @@ -1124,14 +1124,14 @@ do { \
48164 * operations, everything works on mask values.
48166 #define XMDIO_READ(_pdata, _mmd, _reg) \
48167 - ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48168 + ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48169 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48171 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48172 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48174 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48175 - ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48176 + ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48177 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48179 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48180 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48181 index 8a50b01..39c1ad0 100644
48182 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48183 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48184 @@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48186 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48188 - pdata->hw_if.config_dcb_tc(pdata);
48189 + pdata->hw_if->config_dcb_tc(pdata);
48193 @@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48195 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48197 - pdata->hw_if.config_dcb_pfc(pdata);
48198 + pdata->hw_if->config_dcb_pfc(pdata);
48202 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48203 index 5c92fb7..08be735 100644
48204 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48205 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48206 @@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48208 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48210 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48211 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48212 struct xgbe_channel *channel;
48213 struct xgbe_ring *ring;
48214 struct xgbe_ring_data *rdata;
48215 @@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48217 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48219 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48220 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48221 struct xgbe_channel *channel;
48222 struct xgbe_ring *ring;
48223 struct xgbe_ring_desc *rdesc;
48224 @@ -620,17 +620,12 @@ err_out:
48228 -void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48230 - DBGPR("-->xgbe_init_function_ptrs_desc\n");
48232 - desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48233 - desc_if->free_ring_resources = xgbe_free_ring_resources;
48234 - desc_if->map_tx_skb = xgbe_map_tx_skb;
48235 - desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48236 - desc_if->unmap_rdata = xgbe_unmap_rdata;
48237 - desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48238 - desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48240 - DBGPR("<--xgbe_init_function_ptrs_desc\n");
48242 +struct xgbe_desc_if default_xgbe_desc_if = {
48243 + .alloc_ring_resources = xgbe_alloc_ring_resources,
48244 + .free_ring_resources = xgbe_free_ring_resources,
48245 + .map_tx_skb = xgbe_map_tx_skb,
48246 + .map_rx_buffer = xgbe_map_rx_buffer,
48247 + .unmap_rdata = xgbe_unmap_rdata,
48248 + .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48249 + .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48251 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48252 index 21d9497..c74b40f 100644
48253 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48254 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48255 @@ -2772,7 +2772,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48257 static int xgbe_init(struct xgbe_prv_data *pdata)
48259 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48260 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48263 DBGPR("-->xgbe_init\n");
48264 @@ -2838,106 +2838,101 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48268 -void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48270 - DBGPR("-->xgbe_init_function_ptrs\n");
48272 - hw_if->tx_complete = xgbe_tx_complete;
48274 - hw_if->set_mac_address = xgbe_set_mac_address;
48275 - hw_if->config_rx_mode = xgbe_config_rx_mode;
48277 - hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48278 - hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48280 - hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48281 - hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48282 - hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48283 - hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48284 - hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48286 - hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48287 - hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48289 - hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48290 - hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48291 - hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48293 - hw_if->enable_tx = xgbe_enable_tx;
48294 - hw_if->disable_tx = xgbe_disable_tx;
48295 - hw_if->enable_rx = xgbe_enable_rx;
48296 - hw_if->disable_rx = xgbe_disable_rx;
48298 - hw_if->powerup_tx = xgbe_powerup_tx;
48299 - hw_if->powerdown_tx = xgbe_powerdown_tx;
48300 - hw_if->powerup_rx = xgbe_powerup_rx;
48301 - hw_if->powerdown_rx = xgbe_powerdown_rx;
48303 - hw_if->dev_xmit = xgbe_dev_xmit;
48304 - hw_if->dev_read = xgbe_dev_read;
48305 - hw_if->enable_int = xgbe_enable_int;
48306 - hw_if->disable_int = xgbe_disable_int;
48307 - hw_if->init = xgbe_init;
48308 - hw_if->exit = xgbe_exit;
48309 +struct xgbe_hw_if default_xgbe_hw_if = {
48310 + .tx_complete = xgbe_tx_complete,
48312 + .set_mac_address = xgbe_set_mac_address,
48313 + .config_rx_mode = xgbe_config_rx_mode,
48315 + .enable_rx_csum = xgbe_enable_rx_csum,
48316 + .disable_rx_csum = xgbe_disable_rx_csum,
48318 + .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48319 + .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48320 + .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48321 + .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48322 + .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48324 + .read_mmd_regs = xgbe_read_mmd_regs,
48325 + .write_mmd_regs = xgbe_write_mmd_regs,
48327 + .set_gmii_speed = xgbe_set_gmii_speed,
48328 + .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48329 + .set_xgmii_speed = xgbe_set_xgmii_speed,
48331 + .enable_tx = xgbe_enable_tx,
48332 + .disable_tx = xgbe_disable_tx,
48333 + .enable_rx = xgbe_enable_rx,
48334 + .disable_rx = xgbe_disable_rx,
48336 + .powerup_tx = xgbe_powerup_tx,
48337 + .powerdown_tx = xgbe_powerdown_tx,
48338 + .powerup_rx = xgbe_powerup_rx,
48339 + .powerdown_rx = xgbe_powerdown_rx,
48341 + .dev_xmit = xgbe_dev_xmit,
48342 + .dev_read = xgbe_dev_read,
48343 + .enable_int = xgbe_enable_int,
48344 + .disable_int = xgbe_disable_int,
48345 + .init = xgbe_init,
48346 + .exit = xgbe_exit,
48348 /* Descriptor related Sequences have to be initialized here */
48349 - hw_if->tx_desc_init = xgbe_tx_desc_init;
48350 - hw_if->rx_desc_init = xgbe_rx_desc_init;
48351 - hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48352 - hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48353 - hw_if->is_last_desc = xgbe_is_last_desc;
48354 - hw_if->is_context_desc = xgbe_is_context_desc;
48355 - hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48356 + .tx_desc_init = xgbe_tx_desc_init,
48357 + .rx_desc_init = xgbe_rx_desc_init,
48358 + .tx_desc_reset = xgbe_tx_desc_reset,
48359 + .rx_desc_reset = xgbe_rx_desc_reset,
48360 + .is_last_desc = xgbe_is_last_desc,
48361 + .is_context_desc = xgbe_is_context_desc,
48362 + .tx_start_xmit = xgbe_tx_start_xmit,
48364 /* For FLOW ctrl */
48365 - hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48366 - hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48367 + .config_tx_flow_control = xgbe_config_tx_flow_control,
48368 + .config_rx_flow_control = xgbe_config_rx_flow_control,
48370 /* For RX coalescing */
48371 - hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48372 - hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48373 - hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48374 - hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48375 + .config_rx_coalesce = xgbe_config_rx_coalesce,
48376 + .config_tx_coalesce = xgbe_config_tx_coalesce,
48377 + .usec_to_riwt = xgbe_usec_to_riwt,
48378 + .riwt_to_usec = xgbe_riwt_to_usec,
48380 /* For RX and TX threshold config */
48381 - hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48382 - hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48383 + .config_rx_threshold = xgbe_config_rx_threshold,
48384 + .config_tx_threshold = xgbe_config_tx_threshold,
48386 /* For RX and TX Store and Forward Mode config */
48387 - hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48388 - hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48389 + .config_rsf_mode = xgbe_config_rsf_mode,
48390 + .config_tsf_mode = xgbe_config_tsf_mode,
48392 /* For TX DMA Operating on Second Frame config */
48393 - hw_if->config_osp_mode = xgbe_config_osp_mode;
48394 + .config_osp_mode = xgbe_config_osp_mode,
48396 /* For RX and TX PBL config */
48397 - hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48398 - hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48399 - hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48400 - hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48401 - hw_if->config_pblx8 = xgbe_config_pblx8;
48402 + .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48403 + .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48404 + .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48405 + .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48406 + .config_pblx8 = xgbe_config_pblx8,
48408 /* For MMC statistics support */
48409 - hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48410 - hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48411 - hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48412 + .tx_mmc_int = xgbe_tx_mmc_int,
48413 + .rx_mmc_int = xgbe_rx_mmc_int,
48414 + .read_mmc_stats = xgbe_read_mmc_stats,
48416 /* For PTP config */
48417 - hw_if->config_tstamp = xgbe_config_tstamp;
48418 - hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48419 - hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48420 - hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48421 - hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48422 + .config_tstamp = xgbe_config_tstamp,
48423 + .update_tstamp_addend = xgbe_update_tstamp_addend,
48424 + .set_tstamp_time = xgbe_set_tstamp_time,
48425 + .get_tstamp_time = xgbe_get_tstamp_time,
48426 + .get_tx_tstamp = xgbe_get_tx_tstamp,
48428 /* For Data Center Bridging config */
48429 - hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48430 - hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48431 + .config_dcb_tc = xgbe_config_dcb_tc,
48432 + .config_dcb_pfc = xgbe_config_dcb_pfc,
48434 /* For Receive Side Scaling */
48435 - hw_if->enable_rss = xgbe_enable_rss;
48436 - hw_if->disable_rss = xgbe_disable_rss;
48437 - hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48438 - hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48440 - DBGPR("<--xgbe_init_function_ptrs\n");
48442 + .enable_rss = xgbe_enable_rss,
48443 + .disable_rss = xgbe_disable_rss,
48444 + .set_rss_hash_key = xgbe_set_rss_hash_key,
48445 + .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48447 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48448 index 9fd6c69..588ff02 100644
48449 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48450 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48451 @@ -243,7 +243,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48452 * support, tell it now
48454 if (ring->tx.xmit_more)
48455 - pdata->hw_if.tx_start_xmit(channel, ring);
48456 + pdata->hw_if->tx_start_xmit(channel, ring);
48458 return NETDEV_TX_BUSY;
48460 @@ -271,7 +271,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48462 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48464 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48465 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48466 struct xgbe_channel *channel;
48467 enum xgbe_int int_id;
48469 @@ -293,7 +293,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48471 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48473 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48474 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48475 struct xgbe_channel *channel;
48476 enum xgbe_int int_id;
48478 @@ -316,7 +316,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48479 static irqreturn_t xgbe_isr(int irq, void *data)
48481 struct xgbe_prv_data *pdata = data;
48482 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48483 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48484 struct xgbe_channel *channel;
48485 unsigned int dma_isr, dma_ch_isr;
48486 unsigned int mac_isr, mac_tssr;
48487 @@ -682,7 +682,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48489 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48491 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48492 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48494 DBGPR("-->xgbe_init_tx_coalesce\n");
48496 @@ -696,7 +696,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48498 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48500 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48501 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48503 DBGPR("-->xgbe_init_rx_coalesce\n");
48505 @@ -711,7 +711,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48507 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48509 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48510 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48511 struct xgbe_channel *channel;
48512 struct xgbe_ring *ring;
48513 struct xgbe_ring_data *rdata;
48514 @@ -736,7 +736,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48516 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48518 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48519 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48520 struct xgbe_channel *channel;
48521 struct xgbe_ring *ring;
48522 struct xgbe_ring_data *rdata;
48523 @@ -762,7 +762,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48524 static void xgbe_adjust_link(struct net_device *netdev)
48526 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48527 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48528 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48529 struct phy_device *phydev = pdata->phydev;
48532 @@ -870,7 +870,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48533 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48535 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48536 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48537 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48538 unsigned long flags;
48540 DBGPR("-->xgbe_powerdown\n");
48541 @@ -908,7 +908,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48542 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48544 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48545 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48546 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48547 unsigned long flags;
48549 DBGPR("-->xgbe_powerup\n");
48550 @@ -945,7 +945,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48552 static int xgbe_start(struct xgbe_prv_data *pdata)
48554 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48555 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48556 struct net_device *netdev = pdata->netdev;
48559 @@ -984,7 +984,7 @@ err_napi:
48561 static void xgbe_stop(struct xgbe_prv_data *pdata)
48563 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48564 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48565 struct xgbe_channel *channel;
48566 struct net_device *netdev = pdata->netdev;
48567 struct netdev_queue *txq;
48568 @@ -1211,7 +1211,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48572 - pdata->hw_if.config_tstamp(pdata, mac_tscr);
48573 + pdata->hw_if->config_tstamp(pdata, mac_tscr);
48575 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48577 @@ -1360,7 +1360,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48578 static int xgbe_open(struct net_device *netdev)
48580 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48581 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48582 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48585 DBGPR("-->xgbe_open\n");
48586 @@ -1432,7 +1432,7 @@ err_phy_init:
48587 static int xgbe_close(struct net_device *netdev)
48589 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48590 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48591 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48593 DBGPR("-->xgbe_close\n");
48595 @@ -1460,8 +1460,8 @@ static int xgbe_close(struct net_device *netdev)
48596 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48598 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48599 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48600 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48601 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48602 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48603 struct xgbe_channel *channel;
48604 struct xgbe_ring *ring;
48605 struct xgbe_packet_data *packet;
48606 @@ -1529,7 +1529,7 @@ tx_netdev_return:
48607 static void xgbe_set_rx_mode(struct net_device *netdev)
48609 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48610 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48611 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48613 DBGPR("-->xgbe_set_rx_mode\n");
48615 @@ -1541,7 +1541,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48616 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48618 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48619 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48620 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48621 struct sockaddr *saddr = addr;
48623 DBGPR("-->xgbe_set_mac_address\n");
48624 @@ -1616,7 +1616,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48626 DBGPR("-->%s\n", __func__);
48628 - pdata->hw_if.read_mmc_stats(pdata);
48629 + pdata->hw_if->read_mmc_stats(pdata);
48631 s->rx_packets = pstats->rxframecount_gb;
48632 s->rx_bytes = pstats->rxoctetcount_gb;
48633 @@ -1643,7 +1643,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48636 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48637 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48638 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48640 DBGPR("-->%s\n", __func__);
48642 @@ -1659,7 +1659,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48645 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48646 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48647 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48649 DBGPR("-->%s\n", __func__);
48651 @@ -1725,7 +1725,7 @@ static int xgbe_set_features(struct net_device *netdev,
48652 netdev_features_t features)
48654 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48655 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48656 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48657 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48660 @@ -1791,8 +1791,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48661 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48663 struct xgbe_prv_data *pdata = channel->pdata;
48664 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48665 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48666 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48667 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48668 struct xgbe_ring *ring = channel->rx_ring;
48669 struct xgbe_ring_data *rdata;
48671 @@ -1847,8 +1847,8 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
48672 static int xgbe_tx_poll(struct xgbe_channel *channel)
48674 struct xgbe_prv_data *pdata = channel->pdata;
48675 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48676 - struct xgbe_desc_if *desc_if = &pdata->desc_if;
48677 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48678 + struct xgbe_desc_if *desc_if = pdata->desc_if;
48679 struct xgbe_ring *ring = channel->tx_ring;
48680 struct xgbe_ring_data *rdata;
48681 struct xgbe_ring_desc *rdesc;
48682 @@ -1913,7 +1913,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48683 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48685 struct xgbe_prv_data *pdata = channel->pdata;
48686 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48687 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48688 struct xgbe_ring *ring = channel->rx_ring;
48689 struct xgbe_ring_data *rdata;
48690 struct xgbe_packet_data *packet;
48691 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48692 index 5f149e8..6736bf4 100644
48693 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48694 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48695 @@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48697 DBGPR("-->%s\n", __func__);
48699 - pdata->hw_if.read_mmc_stats(pdata);
48700 + pdata->hw_if->read_mmc_stats(pdata);
48701 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48702 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48703 *data++ = *(u64 *)stat;
48704 @@ -396,7 +396,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48705 struct ethtool_coalesce *ec)
48707 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48708 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48709 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48710 unsigned int rx_frames, rx_riwt, rx_usecs;
48711 unsigned int tx_frames;
48713 @@ -521,7 +521,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48714 const u8 *key, const u8 hfunc)
48716 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48717 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48718 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48721 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48722 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48723 index 7149053..889c5492 100644
48724 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48725 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48726 @@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48727 DBGPR("<--xgbe_default_config\n");
48730 -static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48732 - xgbe_init_function_ptrs_dev(&pdata->hw_if);
48733 - xgbe_init_function_ptrs_desc(&pdata->desc_if);
48737 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48739 @@ -387,9 +381,8 @@ static int xgbe_probe(struct platform_device *pdev)
48740 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48742 /* Set all the function pointers */
48743 - xgbe_init_all_fptrs(pdata);
48744 - hw_if = &pdata->hw_if;
48745 - desc_if = &pdata->desc_if;
48746 + hw_if = pdata->hw_if = &default_xgbe_hw_if;
48747 + desc_if = pdata->desc_if = &default_xgbe_desc_if;
48749 /* Issue software reset to device */
48750 hw_if->exit(pdata);
48751 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48752 index 59e267f..0842a88 100644
48753 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48754 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48755 @@ -126,7 +126,7 @@
48756 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48758 struct xgbe_prv_data *pdata = mii->priv;
48759 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48760 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48763 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48764 @@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48767 struct xgbe_prv_data *pdata = mii->priv;
48768 - struct xgbe_hw_if *hw_if = &pdata->hw_if;
48769 + struct xgbe_hw_if *hw_if = pdata->hw_if;
48770 int mmd_data = mmd_val;
48772 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48773 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48774 index b03e4f5..78e4cc4 100644
48775 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48776 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48777 @@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48781 - nsec = pdata->hw_if.get_tstamp_time(pdata);
48782 + nsec = pdata->hw_if->get_tstamp_time(pdata);
48786 @@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48788 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48790 - pdata->hw_if.update_tstamp_addend(pdata, addend);
48791 + pdata->hw_if->update_tstamp_addend(pdata, addend);
48793 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48795 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48796 index e62dfa2..7df28d5 100644
48797 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48798 +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48799 @@ -673,8 +673,8 @@ struct xgbe_prv_data {
48801 unsigned int per_channel_irq;
48803 - struct xgbe_hw_if hw_if;
48804 - struct xgbe_desc_if desc_if;
48805 + struct xgbe_hw_if *hw_if;
48806 + struct xgbe_desc_if *desc_if;
48808 /* AXI DMA settings */
48809 unsigned int coherent;
48810 @@ -797,6 +797,9 @@ struct xgbe_prv_data {
48814 +extern struct xgbe_hw_if default_xgbe_hw_if;
48815 +extern struct xgbe_desc_if default_xgbe_desc_if;
48817 /* Function prototypes*/
48819 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48820 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
48821 index 783543a..a472348 100644
48822 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
48823 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
48824 @@ -1721,7 +1721,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
48825 macaddr = of_get_mac_address(dn);
48826 if (!macaddr || !is_valid_ether_addr(macaddr)) {
48827 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
48828 - random_ether_addr(dev->dev_addr);
48829 + eth_hw_addr_random(dev);
48831 ether_addr_copy(dev->dev_addr, macaddr);
48833 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48834 index d7a7175..7011194 100644
48835 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48836 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48837 @@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48838 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48840 /* RX_MODE controlling object */
48841 - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48842 + bnx2x_init_rx_mode_obj(bp);
48844 /* multicast configuration controlling object */
48845 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48846 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48847 index 07cdf9b..b08ecc7 100644
48848 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48849 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48850 @@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48854 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48855 - struct bnx2x_rx_mode_obj *o)
48856 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48858 if (CHIP_IS_E1x(bp)) {
48859 - o->wait_comp = bnx2x_empty_rx_mode_wait;
48860 - o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48861 + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48862 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48864 - o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48865 - o->config_rx_mode = bnx2x_set_rx_mode_e2;
48866 + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48867 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48871 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48872 index 86baecb..ff3bb46 100644
48873 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48874 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48875 @@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48877 /********************* RX MODE ****************/
48879 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48880 - struct bnx2x_rx_mode_obj *o);
48881 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48884 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48885 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48886 index 31c9f82..e65e986 100644
48887 --- a/drivers/net/ethernet/broadcom/tg3.h
48888 +++ b/drivers/net/ethernet/broadcom/tg3.h
48889 @@ -150,6 +150,7 @@
48890 #define CHIPREV_ID_5750_A0 0x4000
48891 #define CHIPREV_ID_5750_A1 0x4001
48892 #define CHIPREV_ID_5750_A3 0x4003
48893 +#define CHIPREV_ID_5750_C1 0x4201
48894 #define CHIPREV_ID_5750_C2 0x4202
48895 #define CHIPREV_ID_5752_A0_HW 0x5000
48896 #define CHIPREV_ID_5752_A0 0x6000
48897 diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48898 index deb8da6..45d473b 100644
48899 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48900 +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48901 @@ -1694,10 +1694,10 @@ bna_cb_ioceth_reset(void *arg)
48904 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48905 - bna_cb_ioceth_enable,
48906 - bna_cb_ioceth_disable,
48907 - bna_cb_ioceth_hbfail,
48908 - bna_cb_ioceth_reset
48909 + .enable_cbfn = bna_cb_ioceth_enable,
48910 + .disable_cbfn = bna_cb_ioceth_disable,
48911 + .hbfail_cbfn = bna_cb_ioceth_hbfail,
48912 + .reset_cbfn = bna_cb_ioceth_reset
48915 static void bna_attr_init(struct bna_ioceth *ioceth)
48916 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48917 index 8cffcdf..aadf043 100644
48918 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48919 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48920 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48922 struct l2t_skb_cb {
48923 arp_failure_handler_func arp_failure_handler;
48927 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48929 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48930 index badff18..e15c4ec 100644
48931 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
48932 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48933 @@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48934 for (i=0; i<ETH_ALEN; i++) {
48935 tmp.addr[i] = dev->dev_addr[i];
48937 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48938 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48941 case DE4X5_SET_HWADDR: /* Set the hardware address */
48942 @@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48943 spin_lock_irqsave(&lp->lock, flags);
48944 memcpy(&statbuf, &lp->pktStats, ioc->len);
48945 spin_unlock_irqrestore(&lp->lock, flags);
48946 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
48947 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48951 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48952 index e43cc8a..f1cf67c 100644
48953 --- a/drivers/net/ethernet/emulex/benet/be_main.c
48954 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
48955 @@ -539,7 +539,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48959 - ACCESS_ONCE(*acc) = newacc;
48960 + ACCESS_ONCE_RW(*acc) = newacc;
48963 static void populate_erx_stats(struct be_adapter *adapter,
48964 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48965 index 6d0c5d5..55be363 100644
48966 --- a/drivers/net/ethernet/faraday/ftgmac100.c
48967 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
48969 #include <linux/netdevice.h>
48970 #include <linux/phy.h>
48971 #include <linux/platform_device.h>
48972 +#include <linux/interrupt.h>
48973 +#include <linux/irqreturn.h>
48974 #include <net/ip.h>
48976 #include "ftgmac100.h"
48977 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48978 index dce5f7b..2433466 100644
48979 --- a/drivers/net/ethernet/faraday/ftmac100.c
48980 +++ b/drivers/net/ethernet/faraday/ftmac100.c
48982 #include <linux/module.h>
48983 #include <linux/netdevice.h>
48984 #include <linux/platform_device.h>
48985 +#include <linux/interrupt.h>
48986 +#include <linux/irqreturn.h>
48988 #include "ftmac100.h"
48990 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48991 index a92b772..250fe69 100644
48992 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48993 +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48994 @@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48995 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48997 /* Update the base adjustement value. */
48998 - ACCESS_ONCE(pf->ptp_base_adj) = incval;
48999 + ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49000 smp_mb(); /* Force the above update. */
49003 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49004 index e5ba040..d47531c 100644
49005 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49006 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49007 @@ -782,7 +782,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49010 /* update the base incval used to calculate frequency adjustment */
49011 - ACCESS_ONCE(adapter->base_incval) = incval;
49012 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
49015 /* need lock to prevent incorrect read while modifying cyclecounter */
49016 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
49017 index 74d0389..086ac03 100644
49018 --- a/drivers/net/ethernet/marvell/mvneta.c
49019 +++ b/drivers/net/ethernet/marvell/mvneta.c
49020 @@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49021 struct mvneta_rx_queue *rxq)
49023 struct net_device *dev = pp->dev;
49024 - int rx_done, rx_filled;
49027 u32 rcvd_bytes = 0;
49029 @@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49035 /* Fairness NAPI loop */
49036 while (rx_done < rx_todo) {
49037 @@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49042 rx_status = rx_desc->status;
49043 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
49044 data = (unsigned char *)rx_desc->buf_cookie;
49045 @@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49049 + /* Refill processing */
49050 + err = mvneta_rx_refill(pp, rx_desc);
49052 + netdev_err(dev, "Linux processing - Can't refill\n");
49054 + goto err_drop_frame;
49057 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
49059 goto err_drop_frame;
49060 @@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49061 mvneta_rx_csum(pp, rx_status, skb);
49063 napi_gro_receive(&pp->napi, skb);
49065 - /* Refill processing */
49066 - err = mvneta_rx_refill(pp, rx_desc);
49068 - netdev_err(dev, "Linux processing - Can't refill\n");
49075 @@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
49078 /* Update rxq management counters */
49079 - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
49080 + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
49084 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49085 index c10d98f..72914c6 100644
49086 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49087 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49088 @@ -475,8 +475,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
49091 /* we want to dirty this cache line once */
49092 - ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
49093 - ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
49094 + ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
49095 + ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
49097 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
49099 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49100 index 6223930..975033d 100644
49101 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49102 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49103 @@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49104 struct __vxge_hw_fifo *fifo;
49105 struct vxge_hw_fifo_config *config;
49106 u32 txdl_size, txdl_per_memblock;
49107 - struct vxge_hw_mempool_cbs fifo_mp_callback;
49108 + static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49109 + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49112 struct __vxge_hw_virtualpath *vpath;
49114 if ((vp == NULL) || (attr == NULL)) {
49115 @@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49119 - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49122 __vxge_hw_mempool_create(vpath->hldev,
49123 fifo->config->memblock_size,
49124 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49125 index 33669c2..a29c75e 100644
49126 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49127 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49128 @@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49129 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49130 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49131 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49132 - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49133 + pax_open_kernel();
49134 + *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49135 + pax_close_kernel();
49136 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49137 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49138 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49139 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49140 index be7d7a6..a8983f8 100644
49141 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49142 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49143 @@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49144 case QLCNIC_NON_PRIV_FUNC:
49145 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49146 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49147 - nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49148 + pax_open_kernel();
49149 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49150 + pax_close_kernel();
49152 case QLCNIC_PRIV_FUNC:
49153 ahw->op_mode = QLCNIC_PRIV_FUNC;
49154 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49155 - nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49156 + pax_open_kernel();
49157 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49158 + pax_close_kernel();
49160 case QLCNIC_MGMT_FUNC:
49161 ahw->op_mode = QLCNIC_MGMT_FUNC;
49162 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49163 - nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49164 + pax_open_kernel();
49165 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49166 + pax_close_kernel();
49169 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49170 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49171 index 332bb8a..e6adcd1 100644
49172 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49173 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49174 @@ -1285,7 +1285,7 @@ flash_temp:
49175 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49177 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49178 - static const struct qlcnic_dump_operations *fw_dump_ops;
49179 + const struct qlcnic_dump_operations *fw_dump_ops;
49180 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49181 u32 entry_offset, dump, no_entries, buf_offset = 0;
49182 int i, k, ops_cnt, ops_index, dump_size = 0;
49183 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49184 index 3df51fa..e9b517f 100644
49185 --- a/drivers/net/ethernet/realtek/r8169.c
49186 +++ b/drivers/net/ethernet/realtek/r8169.c
49187 @@ -788,22 +788,22 @@ struct rtl8169_private {
49189 void (*write)(struct rtl8169_private *, int, int);
49190 int (*read)(struct rtl8169_private *, int);
49192 + } __no_const mdio_ops;
49194 struct pll_power_ops {
49195 void (*down)(struct rtl8169_private *);
49196 void (*up)(struct rtl8169_private *);
49198 + } __no_const pll_power_ops;
49201 void (*enable)(struct rtl8169_private *);
49202 void (*disable)(struct rtl8169_private *);
49204 + } __no_const jumbo_ops;
49207 void (*write)(struct rtl8169_private *, int, int);
49208 u32 (*read)(struct rtl8169_private *, int);
49210 + } __no_const csi_ops;
49212 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49213 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49214 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49215 index a2e9aee..af41a0e 100644
49216 --- a/drivers/net/ethernet/sfc/ptp.c
49217 +++ b/drivers/net/ethernet/sfc/ptp.c
49218 @@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49219 ptp->start.dma_addr);
49221 /* Clear flag that signals MC ready */
49222 - ACCESS_ONCE(*start) = 0;
49223 + ACCESS_ONCE_RW(*start) = 0;
49224 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49225 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49226 EFX_BUG_ON_PARANOID(rc);
49227 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49228 index 08c483b..2c4a553 100644
49229 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49230 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49231 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49233 writel(value, ioaddr + MMC_CNTRL);
49235 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49236 - MMC_CNTRL, value);
49237 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49238 +// MMC_CNTRL, value);
49241 /* To mask all all interrupts.*/
49242 diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49243 index de28504..7f1c1cd 100644
49244 --- a/drivers/net/ethernet/via/via-rhine.c
49245 +++ b/drivers/net/ethernet/via/via-rhine.c
49246 @@ -2525,7 +2525,7 @@ static struct platform_driver rhine_driver_platform = {
49250 -static struct dmi_system_id rhine_dmi_table[] __initdata = {
49251 +static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49255 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49256 index 41071d3..6e362e1 100644
49257 --- a/drivers/net/hyperv/hyperv_net.h
49258 +++ b/drivers/net/hyperv/hyperv_net.h
49259 @@ -176,7 +176,7 @@ struct rndis_device {
49260 enum rndis_device_state state;
49263 - atomic_t new_req_id;
49264 + atomic_unchecked_t new_req_id;
49266 spinlock_t request_lock;
49267 struct list_head req_list;
49268 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49269 index 9118cea..1a8e06a 100644
49270 --- a/drivers/net/hyperv/rndis_filter.c
49271 +++ b/drivers/net/hyperv/rndis_filter.c
49272 @@ -100,7 +100,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49275 set = &rndis_msg->msg.set_req;
49276 - set->req_id = atomic_inc_return(&dev->new_req_id);
49277 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49279 /* Add to the request list */
49280 spin_lock_irqsave(&dev->request_lock, flags);
49281 @@ -923,7 +923,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49283 /* Setup the rndis set */
49284 halt = &request->request_msg.msg.halt_req;
49285 - halt->req_id = atomic_inc_return(&dev->new_req_id);
49286 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49288 /* Ignore return since this msg is optional. */
49289 rndis_filter_send_request(dev, request);
49290 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49291 index 94570aa..1a798e1 100644
49292 --- a/drivers/net/ifb.c
49293 +++ b/drivers/net/ifb.c
49294 @@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49298 -static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49299 +static struct rtnl_link_ops ifb_link_ops = {
49301 .priv_size = sizeof(struct ifb_private),
49302 .setup = ifb_setup,
49303 diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
49304 index 54549a6..0799442 100644
49305 --- a/drivers/net/ipvlan/ipvlan.h
49306 +++ b/drivers/net/ipvlan/ipvlan.h
49307 @@ -102,6 +102,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
49308 return rcu_dereference(d->rx_handler_data);
49311 +static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
49313 + return rcu_dereference_bh(d->rx_handler_data);
49316 static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
49318 return rtnl_dereference(d->rx_handler_data);
49319 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
49320 index c30b5c3..b349dad 100644
49321 --- a/drivers/net/ipvlan/ipvlan_core.c
49322 +++ b/drivers/net/ipvlan/ipvlan_core.c
49323 @@ -507,7 +507,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
49324 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
49326 struct ipvl_dev *ipvlan = netdev_priv(dev);
49327 - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
49328 + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
49332 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49333 index 9f59f17..52cb38f 100644
49334 --- a/drivers/net/macvlan.c
49335 +++ b/drivers/net/macvlan.c
49336 @@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49340 - atomic_long_inc(&skb->dev->rx_dropped);
49341 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49344 static void macvlan_flush_sources(struct macvlan_port *port,
49345 @@ -1480,13 +1480,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49346 int macvlan_link_register(struct rtnl_link_ops *ops)
49348 /* common fields */
49349 - ops->priv_size = sizeof(struct macvlan_dev);
49350 - ops->validate = macvlan_validate;
49351 - ops->maxtype = IFLA_MACVLAN_MAX;
49352 - ops->policy = macvlan_policy;
49353 - ops->changelink = macvlan_changelink;
49354 - ops->get_size = macvlan_get_size;
49355 - ops->fill_info = macvlan_fill_info;
49356 + pax_open_kernel();
49357 + *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49358 + *(void **)&ops->validate = macvlan_validate;
49359 + *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49360 + *(const void **)&ops->policy = macvlan_policy;
49361 + *(void **)&ops->changelink = macvlan_changelink;
49362 + *(void **)&ops->get_size = macvlan_get_size;
49363 + *(void **)&ops->fill_info = macvlan_fill_info;
49364 + pax_close_kernel();
49366 return rtnl_link_register(ops);
49368 @@ -1572,7 +1574,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49369 return NOTIFY_DONE;
49372 -static struct notifier_block macvlan_notifier_block __read_mostly = {
49373 +static struct notifier_block macvlan_notifier_block = {
49374 .notifier_call = macvlan_device_event,
49377 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49378 index 8c350c5..30fdc98 100644
49379 --- a/drivers/net/macvtap.c
49380 +++ b/drivers/net/macvtap.c
49381 @@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49382 dev->tx_queue_len = TUN_READQ_SIZE;
49385 -static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49386 +static struct rtnl_link_ops macvtap_link_ops = {
49388 .setup = macvtap_setup,
49389 .newlink = macvtap_newlink,
49390 @@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49394 - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49395 + if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49396 put_user(u, &ifr->ifr_flags))
49398 macvtap_put_vlan(vlan);
49399 @@ -1214,7 +1214,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49400 return NOTIFY_DONE;
49403 -static struct notifier_block macvtap_notifier_block __read_mostly = {
49404 +static struct notifier_block macvtap_notifier_block = {
49405 .notifier_call = macvtap_device_event,
49408 @@ -1268,6 +1268,7 @@ static void macvtap_exit(void)
49409 class_unregister(macvtap_class);
49410 cdev_del(&macvtap_cdev);
49411 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
49412 + idr_destroy(&minor_idr);
49414 module_exit(macvtap_exit);
49416 diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49417 index 34924df..a747360 100644
49418 --- a/drivers/net/nlmon.c
49419 +++ b/drivers/net/nlmon.c
49420 @@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49424 -static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49425 +static struct rtnl_link_ops nlmon_link_ops = {
49427 .priv_size = sizeof(struct nlmon),
49428 .setup = nlmon_setup,
49429 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49430 index d551df6..fa4c2df 100644
49431 --- a/drivers/net/phy/phy_device.c
49432 +++ b/drivers/net/phy/phy_device.c
49433 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49437 -static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49438 +static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49439 struct phy_c45_device_ids *c45_ids) {
49442 @@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49443 * its return value is in turn returned.
49446 -static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49447 +static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49448 bool is_c45, struct phy_c45_device_ids *c45_ids)
49451 @@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49452 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49454 struct phy_c45_device_ids c45_ids = {0};
49459 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49460 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49461 index 9d15566..5ad4ef6 100644
49462 --- a/drivers/net/ppp/ppp_generic.c
49463 +++ b/drivers/net/ppp/ppp_generic.c
49464 @@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49465 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49466 struct ppp_stats stats;
49467 struct ppp_comp_stats cstats;
49471 case SIOCGPPPSTATS:
49472 @@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49476 - vers = PPP_VERSION;
49477 - if (copy_to_user(addr, vers, strlen(vers) + 1))
49478 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49482 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49483 index 079f7ad..b2a2bfa7 100644
49484 --- a/drivers/net/slip/slhc.c
49485 +++ b/drivers/net/slip/slhc.c
49486 @@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49487 register struct tcphdr *thp;
49488 register struct iphdr *ip;
49489 register struct cstate *cs;
49491 + long len, hdrlen;
49492 unsigned char *cp = icp;
49494 /* We've got a compressed packet; read the change byte */
49495 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49496 index 6928448..e30c57f 100644
49497 --- a/drivers/net/team/team.c
49498 +++ b/drivers/net/team/team.c
49499 @@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
49500 return TEAM_DEFAULT_NUM_RX_QUEUES;
49503 -static struct rtnl_link_ops team_link_ops __read_mostly = {
49504 +static struct rtnl_link_ops team_link_ops = {
49506 .priv_size = sizeof(struct team),
49507 .setup = team_setup,
49508 @@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
49509 return NOTIFY_DONE;
49512 -static struct notifier_block team_notifier_block __read_mostly = {
49513 +static struct notifier_block team_notifier_block = {
49514 .notifier_call = team_device_event,
49517 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49518 index e470ae5..e812f5e 100644
49519 --- a/drivers/net/tun.c
49520 +++ b/drivers/net/tun.c
49521 @@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49525 -static struct rtnl_link_ops tun_link_ops __read_mostly = {
49526 +static struct rtnl_link_ops tun_link_ops = {
49528 .priv_size = sizeof(struct tun_struct),
49529 .setup = tun_setup,
49530 @@ -1828,7 +1828,7 @@ unlock:
49533 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49534 - unsigned long arg, int ifreq_len)
49535 + unsigned long arg, size_t ifreq_len)
49537 struct tun_file *tfile = file->private_data;
49538 struct tun_struct *tun;
49539 @@ -1842,6 +1842,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49543 + if (ifreq_len > sizeof ifr)
49546 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49547 if (copy_from_user(&ifr, argp, ifreq_len))
49549 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49550 index 111d907..1ee643e 100644
49551 --- a/drivers/net/usb/hso.c
49552 +++ b/drivers/net/usb/hso.c
49554 #include <asm/byteorder.h>
49555 #include <linux/serial_core.h>
49556 #include <linux/serial.h>
49558 +#include <asm/local.h>
49560 #define MOD_AUTHOR "Option Wireless"
49561 #define MOD_DESCRIPTION "USB High Speed Option driver"
49562 @@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49565 urb = serial->rx_urb[0];
49566 - if (serial->port.count > 0) {
49567 + if (atomic_read(&serial->port.count) > 0) {
49568 count = put_rxbuf_data(urb, serial);
49571 @@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49572 DUMP1(urb->transfer_buffer, urb->actual_length);
49574 /* Anyone listening? */
49575 - if (serial->port.count == 0)
49576 + if (atomic_read(&serial->port.count) == 0)
49579 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49580 @@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49581 tty_port_tty_set(&serial->port, tty);
49583 /* check for port already opened, if not set the termios */
49584 - serial->port.count++;
49585 - if (serial->port.count == 1) {
49586 + if (atomic_inc_return(&serial->port.count) == 1) {
49587 serial->rx_state = RX_IDLE;
49588 /* Force default termio settings */
49589 _hso_serial_set_termios(tty, NULL);
49590 @@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49591 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49593 hso_stop_serial_device(serial->parent);
49594 - serial->port.count--;
49595 + atomic_dec(&serial->port.count);
49597 kref_get(&serial->parent->ref);
49599 @@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49601 /* reset the rts and dtr */
49602 /* do the actual close */
49603 - serial->port.count--;
49604 + atomic_dec(&serial->port.count);
49606 - if (serial->port.count <= 0) {
49607 - serial->port.count = 0;
49608 + if (atomic_read(&serial->port.count) <= 0) {
49609 + atomic_set(&serial->port.count, 0);
49610 tty_port_tty_set(&serial->port, NULL);
49612 hso_stop_serial_device(serial->parent);
49613 @@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49615 /* the actual setup */
49616 spin_lock_irqsave(&serial->serial_lock, flags);
49617 - if (serial->port.count)
49618 + if (atomic_read(&serial->port.count))
49619 _hso_serial_set_termios(tty, old);
49621 tty->termios = *old;
49622 @@ -1891,7 +1890,7 @@ static void intr_callback(struct urb *urb)
49623 D1("Pending read interrupt on port %d\n", i);
49624 spin_lock(&serial->serial_lock);
49625 if (serial->rx_state == RX_IDLE &&
49626 - serial->port.count > 0) {
49627 + atomic_read(&serial->port.count) > 0) {
49628 /* Setup and send a ctrl req read on
49630 if (!serial->rx_urb_filled[0]) {
49631 @@ -3058,7 +3057,7 @@ static int hso_resume(struct usb_interface *iface)
49632 /* Start all serial ports */
49633 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49634 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49635 - if (dev2ser(serial_table[i])->port.count) {
49636 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49638 hso_start_serial_device(serial_table[i], GFP_NOIO);
49639 hso_kick_transmit(dev2ser(serial_table[i]));
49640 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49641 index aafa1a1..f59c651 100644
49642 --- a/drivers/net/usb/r8152.c
49643 +++ b/drivers/net/usb/r8152.c
49644 @@ -602,7 +602,7 @@ struct r8152 {
49645 void (*unload)(struct r8152 *);
49646 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49647 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49649 + } __no_const rtl_ops;
49653 diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49654 index a2515887..6d13233 100644
49655 --- a/drivers/net/usb/sierra_net.c
49656 +++ b/drivers/net/usb/sierra_net.c
49657 @@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49658 /* atomic counter partially included in MAC address to make sure 2 devices
49659 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49661 -static atomic_t iface_counter = ATOMIC_INIT(0);
49662 +static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49665 * SYNC Timer Delay definition used to set the expiry time
49666 @@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49667 dev->net->netdev_ops = &sierra_net_device_ops;
49669 /* change MAC addr to include, ifacenum, and to be unique */
49670 - dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49671 + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49672 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49674 /* we will have to manufacture ethernet headers, prepare template */
49675 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49676 index 63c7810..4ad33aa 100644
49677 --- a/drivers/net/virtio_net.c
49678 +++ b/drivers/net/virtio_net.c
49679 @@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49680 #define RECEIVE_AVG_WEIGHT 64
49682 /* Minimum alignment for mergeable packet buffers. */
49683 -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49684 +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49686 #define VIRTNET_DRIVER_VERSION "1.0.0"
49688 diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
49689 index 61c0840..92e7f7e 100644
49690 --- a/drivers/net/vmxnet3/vmxnet3_drv.c
49691 +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
49692 @@ -1167,7 +1167,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
49693 static const u32 rxprod_reg[2] = {
49694 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
49697 + u32 num_pkts = 0;
49698 bool skip_page_frags = false;
49699 struct Vmxnet3_RxCompDesc *rcd;
49700 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
49701 @@ -1185,13 +1185,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
49702 struct Vmxnet3_RxDesc *rxd;
49704 struct vmxnet3_cmd_ring *ring = NULL;
49705 - if (num_rxd >= quota) {
49706 + if (num_pkts >= quota) {
49707 /* we may stop even before we see the EOP desc of
49713 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
49715 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
49716 @@ -1323,6 +1322,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
49717 napi_gro_receive(&rq->napi, skb);
49724 @@ -1353,7 +1353,7 @@ rcd_done:
49725 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
49733 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49734 index 21a0fbf..055b54f 100644
49735 --- a/drivers/net/vxlan.c
49736 +++ b/drivers/net/vxlan.c
49737 @@ -2878,7 +2878,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49741 -static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49742 +static struct rtnl_link_ops vxlan_link_ops = {
49744 .maxtype = IFLA_VXLAN_MAX,
49745 .policy = vxlan_policy,
49746 @@ -2926,7 +2926,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49747 return NOTIFY_DONE;
49750 -static struct notifier_block vxlan_notifier_block __read_mostly = {
49751 +static struct notifier_block vxlan_notifier_block = {
49752 .notifier_call = vxlan_lowerdev_event,
49755 diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49756 index 5920c99..ff2e4a5 100644
49757 --- a/drivers/net/wan/lmc/lmc_media.c
49758 +++ b/drivers/net/wan/lmc/lmc_media.c
49759 @@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49760 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49762 lmc_media_t lmc_ds3_media = {
49763 - lmc_ds3_init, /* special media init stuff */
49764 - lmc_ds3_default, /* reset to default state */
49765 - lmc_ds3_set_status, /* reset status to state provided */
49766 - lmc_dummy_set_1, /* set clock source */
49767 - lmc_dummy_set2_1, /* set line speed */
49768 - lmc_ds3_set_100ft, /* set cable length */
49769 - lmc_ds3_set_scram, /* set scrambler */
49770 - lmc_ds3_get_link_status, /* get link status */
49771 - lmc_dummy_set_1, /* set link status */
49772 - lmc_ds3_set_crc_length, /* set CRC length */
49773 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
49775 + .init = lmc_ds3_init, /* special media init stuff */
49776 + .defaults = lmc_ds3_default, /* reset to default state */
49777 + .set_status = lmc_ds3_set_status, /* reset status to state provided */
49778 + .set_clock_source = lmc_dummy_set_1, /* set clock source */
49779 + .set_speed = lmc_dummy_set2_1, /* set line speed */
49780 + .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49781 + .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49782 + .get_link_status = lmc_ds3_get_link_status, /* get link status */
49783 + .set_link_status = lmc_dummy_set_1, /* set link status */
49784 + .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49785 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49786 + .watchdog = lmc_ds3_watchdog
49789 lmc_media_t lmc_hssi_media = {
49790 - lmc_hssi_init, /* special media init stuff */
49791 - lmc_hssi_default, /* reset to default state */
49792 - lmc_hssi_set_status, /* reset status to state provided */
49793 - lmc_hssi_set_clock, /* set clock source */
49794 - lmc_dummy_set2_1, /* set line speed */
49795 - lmc_dummy_set_1, /* set cable length */
49796 - lmc_dummy_set_1, /* set scrambler */
49797 - lmc_hssi_get_link_status, /* get link status */
49798 - lmc_hssi_set_link_status, /* set link status */
49799 - lmc_hssi_set_crc_length, /* set CRC length */
49800 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
49801 - lmc_hssi_watchdog
49802 + .init = lmc_hssi_init, /* special media init stuff */
49803 + .defaults = lmc_hssi_default, /* reset to default state */
49804 + .set_status = lmc_hssi_set_status, /* reset status to state provided */
49805 + .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49806 + .set_speed = lmc_dummy_set2_1, /* set line speed */
49807 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
49808 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49809 + .get_link_status = lmc_hssi_get_link_status, /* get link status */
49810 + .set_link_status = lmc_hssi_set_link_status, /* set link status */
49811 + .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49812 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49813 + .watchdog = lmc_hssi_watchdog
49816 -lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49817 - lmc_ssi_default, /* reset to default state */
49818 - lmc_ssi_set_status, /* reset status to state provided */
49819 - lmc_ssi_set_clock, /* set clock source */
49820 - lmc_ssi_set_speed, /* set line speed */
49821 - lmc_dummy_set_1, /* set cable length */
49822 - lmc_dummy_set_1, /* set scrambler */
49823 - lmc_ssi_get_link_status, /* get link status */
49824 - lmc_ssi_set_link_status, /* set link status */
49825 - lmc_ssi_set_crc_length, /* set CRC length */
49826 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
49828 +lmc_media_t lmc_ssi_media = {
49829 + .init = lmc_ssi_init, /* special media init stuff */
49830 + .defaults = lmc_ssi_default, /* reset to default state */
49831 + .set_status = lmc_ssi_set_status, /* reset status to state provided */
49832 + .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49833 + .set_speed = lmc_ssi_set_speed, /* set line speed */
49834 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
49835 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49836 + .get_link_status = lmc_ssi_get_link_status, /* get link status */
49837 + .set_link_status = lmc_ssi_set_link_status, /* set link status */
49838 + .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49839 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49840 + .watchdog = lmc_ssi_watchdog
49843 lmc_media_t lmc_t1_media = {
49844 - lmc_t1_init, /* special media init stuff */
49845 - lmc_t1_default, /* reset to default state */
49846 - lmc_t1_set_status, /* reset status to state provided */
49847 - lmc_t1_set_clock, /* set clock source */
49848 - lmc_dummy_set2_1, /* set line speed */
49849 - lmc_dummy_set_1, /* set cable length */
49850 - lmc_dummy_set_1, /* set scrambler */
49851 - lmc_t1_get_link_status, /* get link status */
49852 - lmc_dummy_set_1, /* set link status */
49853 - lmc_t1_set_crc_length, /* set CRC length */
49854 - lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49856 + .init = lmc_t1_init, /* special media init stuff */
49857 + .defaults = lmc_t1_default, /* reset to default state */
49858 + .set_status = lmc_t1_set_status, /* reset status to state provided */
49859 + .set_clock_source = lmc_t1_set_clock, /* set clock source */
49860 + .set_speed = lmc_dummy_set2_1, /* set line speed */
49861 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
49862 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49863 + .get_link_status = lmc_t1_get_link_status, /* get link status */
49864 + .set_link_status = lmc_dummy_set_1, /* set link status */
49865 + .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49866 + .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49867 + .watchdog = lmc_t1_watchdog
49871 diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49872 index feacc3b..5bac0de 100644
49873 --- a/drivers/net/wan/z85230.c
49874 +++ b/drivers/net/wan/z85230.c
49875 @@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49877 struct z8530_irqhandler z8530_sync =
49884 + .status = z8530_status
49887 EXPORT_SYMBOL(z8530_sync);
49888 @@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49891 static struct z8530_irqhandler z8530_dma_sync = {
49895 + .rx = z8530_dma_rx,
49896 + .tx = z8530_dma_tx,
49897 + .status = z8530_dma_status
49900 static struct z8530_irqhandler z8530_txdma_sync = {
49905 + .tx = z8530_dma_tx,
49906 + .status = z8530_dma_status
49910 @@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49912 struct z8530_irqhandler z8530_nop=
49916 - z8530_status_clear
49917 + .rx = z8530_rx_clear,
49918 + .tx = z8530_tx_clear,
49919 + .status = z8530_status_clear
49923 diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49924 index 0b60295..b8bfa5b 100644
49925 --- a/drivers/net/wimax/i2400m/rx.c
49926 +++ b/drivers/net/wimax/i2400m/rx.c
49927 @@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49928 if (i2400m->rx_roq == NULL)
49929 goto error_roq_alloc;
49931 - rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49932 + rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49936 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49937 index d0c97c2..108f59b 100644
49938 --- a/drivers/net/wireless/airo.c
49939 +++ b/drivers/net/wireless/airo.c
49940 @@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49941 struct airo_info *ai = dev->ml_priv;
49944 - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49945 + int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49946 unsigned char *iobuf;
49948 /* Only super-user can write RIDs */
49949 diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49950 index 49219c5..3625441 100644
49951 --- a/drivers/net/wireless/at76c50x-usb.c
49952 +++ b/drivers/net/wireless/at76c50x-usb.c
49953 @@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49956 /* Convert timeout from the DFU status to jiffies */
49957 -static inline unsigned long at76_get_timeout(struct dfu_status *s)
49958 +static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49960 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49961 | (s->poll_timeout[1] << 8)
49962 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49963 index 2fd9e18..3f55bdd 100644
49964 --- a/drivers/net/wireless/ath/ath10k/htc.c
49965 +++ b/drivers/net/wireless/ath/ath10k/htc.c
49966 @@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49967 /* registered target arrival callback from the HIF layer */
49968 int ath10k_htc_init(struct ath10k *ar)
49970 - struct ath10k_hif_cb htc_callbacks;
49971 + static struct ath10k_hif_cb htc_callbacks = {
49972 + .rx_completion = ath10k_htc_rx_completion_handler,
49973 + .tx_completion = ath10k_htc_tx_completion_handler,
49975 struct ath10k_htc_ep *ep = NULL;
49976 struct ath10k_htc *htc = &ar->htc;
49978 @@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49979 ath10k_htc_reset_endpoint_states(htc);
49981 /* setup HIF layer callbacks */
49982 - htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49983 - htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49986 /* Get HIF default pipe for HTC message exchange */
49987 diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49988 index 527179c..a890150 100644
49989 --- a/drivers/net/wireless/ath/ath10k/htc.h
49990 +++ b/drivers/net/wireless/ath/ath10k/htc.h
49991 @@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
49993 struct ath10k_htc_ops {
49994 void (*target_send_suspend_complete)(struct ath10k *ar);
49998 struct ath10k_htc_ep_ops {
49999 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50000 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50001 void (*ep_tx_credits)(struct ath10k *);
50005 /* service connection information */
50006 struct ath10k_htc_svc_conn_req {
50007 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50008 index f816909..e56cd8b 100644
50009 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50010 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50011 @@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50012 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50013 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50015 - ACCESS_ONCE(ads->ds_link) = i->link;
50016 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50017 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
50018 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50020 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50021 ctl6 = SM(i->keytype, AR_EncrType);
50022 @@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50024 if ((i->is_first || i->is_last) &&
50025 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50026 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50027 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50028 | set11nTries(i->rates, 1)
50029 | set11nTries(i->rates, 2)
50030 | set11nTries(i->rates, 3)
50031 | (i->dur_update ? AR_DurUpdateEna : 0)
50032 | SM(0, AR_BurstDur);
50034 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50035 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50036 | set11nRate(i->rates, 1)
50037 | set11nRate(i->rates, 2)
50038 | set11nRate(i->rates, 3);
50040 - ACCESS_ONCE(ads->ds_ctl2) = 0;
50041 - ACCESS_ONCE(ads->ds_ctl3) = 0;
50042 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50043 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50046 if (!i->is_first) {
50047 - ACCESS_ONCE(ads->ds_ctl0) = 0;
50048 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50049 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50050 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50051 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50052 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50056 @@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50060 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50061 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50062 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50063 | SM(i->txpower[0], AR_XmitPower0)
50064 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50065 @@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50066 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50067 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50069 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50070 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50071 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50072 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50074 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50077 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50078 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50079 | set11nPktDurRTSCTS(i->rates, 1);
50081 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50082 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50083 | set11nPktDurRTSCTS(i->rates, 3);
50085 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50086 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50087 | set11nRateFlags(i->rates, 1)
50088 | set11nRateFlags(i->rates, 2)
50089 | set11nRateFlags(i->rates, 3)
50090 | SM(i->rtscts_rate, AR_RTSCTSRate);
50092 - ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50093 - ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50094 - ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50095 + ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50096 + ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50097 + ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50100 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
50101 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50102 index da84b70..83e4978 100644
50103 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50104 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50105 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50106 (i->qcu << AR_TxQcuNum_S) | desc_len;
50109 - ACCESS_ONCE(ads->info) = val;
50110 + ACCESS_ONCE_RW(ads->info) = val;
50112 checksum += i->link;
50113 - ACCESS_ONCE(ads->link) = i->link;
50114 + ACCESS_ONCE_RW(ads->link) = i->link;
50116 checksum += i->buf_addr[0];
50117 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50118 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50119 checksum += i->buf_addr[1];
50120 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50121 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50122 checksum += i->buf_addr[2];
50123 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50124 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50125 checksum += i->buf_addr[3];
50126 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50127 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50129 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50130 - ACCESS_ONCE(ads->ctl3) = val;
50131 + ACCESS_ONCE_RW(ads->ctl3) = val;
50132 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50133 - ACCESS_ONCE(ads->ctl5) = val;
50134 + ACCESS_ONCE_RW(ads->ctl5) = val;
50135 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50136 - ACCESS_ONCE(ads->ctl7) = val;
50137 + ACCESS_ONCE_RW(ads->ctl7) = val;
50138 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50139 - ACCESS_ONCE(ads->ctl9) = val;
50140 + ACCESS_ONCE_RW(ads->ctl9) = val;
50142 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50143 - ACCESS_ONCE(ads->ctl10) = checksum;
50144 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
50146 if (i->is_first || i->is_last) {
50147 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50148 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50149 | set11nTries(i->rates, 1)
50150 | set11nTries(i->rates, 2)
50151 | set11nTries(i->rates, 3)
50152 | (i->dur_update ? AR_DurUpdateEna : 0)
50153 | SM(0, AR_BurstDur);
50155 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50156 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50157 | set11nRate(i->rates, 1)
50158 | set11nRate(i->rates, 2)
50159 | set11nRate(i->rates, 3);
50161 - ACCESS_ONCE(ads->ctl13) = 0;
50162 - ACCESS_ONCE(ads->ctl14) = 0;
50163 + ACCESS_ONCE_RW(ads->ctl13) = 0;
50164 + ACCESS_ONCE_RW(ads->ctl14) = 0;
50168 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50170 ctl17 = SM(i->keytype, AR_EncrType);
50171 if (!i->is_first) {
50172 - ACCESS_ONCE(ads->ctl11) = 0;
50173 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50174 - ACCESS_ONCE(ads->ctl15) = 0;
50175 - ACCESS_ONCE(ads->ctl16) = 0;
50176 - ACCESS_ONCE(ads->ctl17) = ctl17;
50177 - ACCESS_ONCE(ads->ctl18) = 0;
50178 - ACCESS_ONCE(ads->ctl19) = 0;
50179 + ACCESS_ONCE_RW(ads->ctl11) = 0;
50180 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50181 + ACCESS_ONCE_RW(ads->ctl15) = 0;
50182 + ACCESS_ONCE_RW(ads->ctl16) = 0;
50183 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50184 + ACCESS_ONCE_RW(ads->ctl18) = 0;
50185 + ACCESS_ONCE_RW(ads->ctl19) = 0;
50189 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50190 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50191 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50192 | SM(i->txpower[0], AR_XmitPower0)
50193 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50194 @@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50195 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50196 ctl12 |= SM(val, AR_PAPRDChainMask);
50198 - ACCESS_ONCE(ads->ctl12) = ctl12;
50199 - ACCESS_ONCE(ads->ctl17) = ctl17;
50200 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50201 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50203 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50204 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50205 | set11nPktDurRTSCTS(i->rates, 1);
50207 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50208 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50209 | set11nPktDurRTSCTS(i->rates, 3);
50211 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50212 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50213 | set11nRateFlags(i->rates, 1)
50214 | set11nRateFlags(i->rates, 2)
50215 | set11nRateFlags(i->rates, 3)
50216 | SM(i->rtscts_rate, AR_RTSCTSRate);
50218 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50219 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50221 - ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50222 - ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50223 - ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50224 + ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50225 + ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50226 + ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50229 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50230 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50231 index c1d2d03..08352db 100644
50232 --- a/drivers/net/wireless/ath/ath9k/hw.h
50233 +++ b/drivers/net/wireless/ath/ath9k/hw.h
50234 @@ -671,7 +671,7 @@ struct ath_hw_private_ops {
50235 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50236 bool (*is_aic_enabled)(struct ath_hw *ah);
50237 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
50242 * struct ath_spec_scan - parameters for Atheros spectral scan
50243 @@ -747,7 +747,7 @@ struct ath_hw_ops {
50244 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50245 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50250 struct ath_nf_limits {
50252 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50253 index b0badef..3e3464c 100644
50254 --- a/drivers/net/wireless/ath/ath9k/main.c
50255 +++ b/drivers/net/wireless/ath/ath9k/main.c
50256 @@ -2573,16 +2573,18 @@ void ath9k_fill_chanctx_ops(void)
50257 if (!ath9k_is_chanctx_enabled())
50260 - ath9k_ops.hw_scan = ath9k_hw_scan;
50261 - ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50262 - ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50263 - ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50264 - ath9k_ops.add_chanctx = ath9k_add_chanctx;
50265 - ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50266 - ath9k_ops.change_chanctx = ath9k_change_chanctx;
50267 - ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50268 - ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50269 - ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50270 + pax_open_kernel();
50271 + *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50272 + *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50273 + *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50274 + *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50275 + *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50276 + *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50277 + *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50278 + *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50279 + *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50280 + *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50281 + pax_close_kernel();
50285 diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50286 index 058a9f2..d5cb1ba 100644
50287 --- a/drivers/net/wireless/b43/phy_lp.c
50288 +++ b/drivers/net/wireless/b43/phy_lp.c
50289 @@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50291 struct ssb_bus *bus = dev->dev->sdev->bus;
50293 - static const struct b206x_channel *chandata = NULL;
50294 + const struct b206x_channel *chandata = NULL;
50295 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50296 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50297 u16 old_comm15, scale;
50298 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50299 index e566580..2c218ca 100644
50300 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
50301 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50302 @@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50304 if (il3945_mod_params.disable_hw_scan) {
50305 D_INFO("Disabling hw_scan\n");
50306 - il3945_mac_ops.hw_scan = NULL;
50307 + pax_open_kernel();
50308 + *(void **)&il3945_mac_ops.hw_scan = NULL;
50309 + pax_close_kernel();
50312 D_INFO("*** LOAD DRIVER ***\n");
50313 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50314 index 0ffb6ff..c0b7f0e 100644
50315 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50316 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50317 @@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50319 struct iwl_priv *priv = file->private_data;
50325 memset(buf, 0, sizeof(buf));
50326 @@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50327 struct iwl_priv *priv = file->private_data;
50334 memset(buf, 0, sizeof(buf));
50335 @@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50337 struct iwl_priv *priv = file->private_data;
50343 memset(buf, 0, sizeof(buf));
50344 @@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50346 struct iwl_priv *priv = file->private_data;
50352 memset(buf, 0, sizeof(buf));
50353 @@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50354 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50355 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50357 -static const char *fmt_value = " %-30s %10u\n";
50358 -static const char *fmt_hex = " %-30s 0x%02X\n";
50359 -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50360 -static const char *fmt_header =
50361 +static const char fmt_value[] = " %-30s %10u\n";
50362 +static const char fmt_hex[] = " %-30s 0x%02X\n";
50363 +static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50364 +static const char fmt_header[] =
50365 "%-32s current cumulative delta max\n";
50367 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50368 @@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50370 struct iwl_priv *priv = file->private_data;
50376 memset(buf, 0, sizeof(buf));
50377 @@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50379 struct iwl_priv *priv = file->private_data;
50385 memset(buf, 0, sizeof(buf));
50386 @@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50388 struct iwl_priv *priv = file->private_data;
50394 memset(buf, 0, sizeof(buf));
50395 @@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50397 struct iwl_priv *priv = file->private_data;
50403 memset(buf, 0, sizeof(buf));
50404 @@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50406 struct iwl_priv *priv = file->private_data;
50412 memset(buf, 0, sizeof(buf));
50413 @@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50415 struct iwl_priv *priv = file->private_data;
50421 if (!priv->cfg->ht_params)
50422 @@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50424 struct iwl_priv *priv = file->private_data;
50429 memset(buf, 0, sizeof(buf));
50430 buf_size = min(count, sizeof(buf) - 1);
50431 @@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50432 struct iwl_priv *priv = file->private_data;
50433 u32 event_log_flag;
50438 /* check that the interface is up */
50439 if (!iwl_is_ready(priv))
50440 @@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50441 struct iwl_priv *priv = file->private_data;
50443 u32 calib_disabled;
50447 memset(buf, 0, sizeof(buf));
50448 buf_size = min(count, sizeof(buf) - 1);
50449 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50450 index dc17909..989c9fb 100644
50451 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50452 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50453 @@ -1919,7 +1919,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50454 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50461 memset(buf, 0, sizeof(buf));
50462 @@ -1940,7 +1940,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50464 struct iwl_trans *trans = file->private_data;
50470 memset(buf, 0, sizeof(buf));
50471 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50472 index d5c0a1a..d056b20 100644
50473 --- a/drivers/net/wireless/mac80211_hwsim.c
50474 +++ b/drivers/net/wireless/mac80211_hwsim.c
50475 @@ -3149,20 +3149,20 @@ static int __init init_mac80211_hwsim(void)
50479 - mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50480 - mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50481 - mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50482 - mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50483 - mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50484 - mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50485 - mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50486 - mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50487 - mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50488 - mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50489 - mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50490 - mac80211_hwsim_assign_vif_chanctx;
50491 - mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50492 - mac80211_hwsim_unassign_vif_chanctx;
50493 + pax_open_kernel();
50494 + memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50495 + *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50496 + *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50497 + *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50498 + *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50499 + *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50500 + *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50501 + *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50502 + *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50503 + *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50504 + *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50505 + *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50506 + pax_close_kernel();
50508 spin_lock_init(&hwsim_radio_lock);
50509 INIT_LIST_HEAD(&hwsim_radios);
50510 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50511 index d72ff8e..c209a45 100644
50512 --- a/drivers/net/wireless/rndis_wlan.c
50513 +++ b/drivers/net/wireless/rndis_wlan.c
50514 @@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50516 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50518 - if (rts_threshold < 0 || rts_threshold > 2347)
50519 + if (rts_threshold > 2347)
50520 rts_threshold = 2347;
50522 tmp = cpu_to_le32(rts_threshold);
50523 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50524 index 9bb398b..b0cc047 100644
50525 --- a/drivers/net/wireless/rt2x00/rt2x00.h
50526 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
50527 @@ -375,7 +375,7 @@ struct rt2x00_intf {
50528 * for hardware which doesn't support hardware
50529 * sequence counting.
50532 + atomic_unchecked_t seqno;
50535 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50536 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50537 index 68b620b..92ecd9e 100644
50538 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50539 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50540 @@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50541 * sequence counter given by mac80211.
50543 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50544 - seqno = atomic_add_return(0x10, &intf->seqno);
50545 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50547 - seqno = atomic_read(&intf->seqno);
50548 + seqno = atomic_read_unchecked(&intf->seqno);
50550 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50551 hdr->seq_ctrl |= cpu_to_le16(seqno);
50552 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50553 index b661f896..ddf7d2b 100644
50554 --- a/drivers/net/wireless/ti/wl1251/sdio.c
50555 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
50556 @@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50558 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50560 - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50561 - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50562 + pax_open_kernel();
50563 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50564 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50565 + pax_close_kernel();
50567 wl1251_info("using dedicated interrupt line");
50569 - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50570 - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50571 + pax_open_kernel();
50572 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50573 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50574 + pax_close_kernel();
50576 wl1251_info("using SDIO interrupt");
50578 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50579 index af0fe2e..d04986b 100644
50580 --- a/drivers/net/wireless/ti/wl12xx/main.c
50581 +++ b/drivers/net/wireless/ti/wl12xx/main.c
50582 @@ -655,7 +655,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50583 sizeof(wl->conf.mem));
50585 /* read data preparation is only needed by wl127x */
50586 - wl->ops->prepare_read = wl127x_prepare_read;
50587 + pax_open_kernel();
50588 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50589 + pax_close_kernel();
50591 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50592 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50593 @@ -680,7 +682,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50594 sizeof(wl->conf.mem));
50596 /* read data preparation is only needed by wl127x */
50597 - wl->ops->prepare_read = wl127x_prepare_read;
50598 + pax_open_kernel();
50599 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50600 + pax_close_kernel();
50602 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50603 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50604 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50605 index 717c4f5..a813aeb 100644
50606 --- a/drivers/net/wireless/ti/wl18xx/main.c
50607 +++ b/drivers/net/wireless/ti/wl18xx/main.c
50608 @@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50611 if (!checksum_param) {
50612 - wl18xx_ops.set_rx_csum = NULL;
50613 - wl18xx_ops.init_vif = NULL;
50614 + pax_open_kernel();
50615 + *(void **)&wl18xx_ops.set_rx_csum = NULL;
50616 + *(void **)&wl18xx_ops.init_vif = NULL;
50617 + pax_close_kernel();
50620 /* Enable 11a Band only if we have 5G antennas */
50621 diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50622 index a912dc0..a8225ba 100644
50623 --- a/drivers/net/wireless/zd1211rw/zd_usb.c
50624 +++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50625 @@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50627 struct zd_usb *usb = urb->context;
50628 struct zd_usb_interrupt *intr = &usb->intr;
50630 + unsigned int len;
50633 ZD_ASSERT(in_interrupt());
50634 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
50635 index 0d25943..0866c5d 100644
50636 --- a/drivers/net/xen-netback/netback.c
50637 +++ b/drivers/net/xen-netback/netback.c
50638 @@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
50642 - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
50643 + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
50645 queue->dealloc_ring[pending_index(dc++)];
50647 - pending_idx_release[gop-queue->tx_unmap_ops] =
50648 + pending_idx_release[gop - queue->tx_unmap_ops] =
50650 - queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
50651 + queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
50652 queue->mmap_pages[pending_idx];
50653 gnttab_set_unmap_op(gop,
50654 idx_to_kaddr(queue, pending_idx),
50655 diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50656 index ce2e2cf..f81e500 100644
50657 --- a/drivers/nfc/nfcwilink.c
50658 +++ b/drivers/nfc/nfcwilink.c
50659 @@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50661 static int nfcwilink_probe(struct platform_device *pdev)
50663 - static struct nfcwilink *drv;
50664 + struct nfcwilink *drv;
50668 diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50669 index d251f72..0512865 100644
50670 --- a/drivers/nfc/st21nfca/st21nfca.c
50671 +++ b/drivers/nfc/st21nfca/st21nfca.c
50672 @@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50673 ST21NFCA_DEVICE_MGNT_GATE,
50674 ST21NFCA_DEVICE_MGNT_PIPE);
50679 /* Get pipe list */
50680 r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
50681 ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
50687 /* Complete the existing gate_pipe table */
50688 for (i = 0; i < skb_pipe_list->len; i++) {
50689 @@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50690 info->src_host_id != ST21NFCA_ESE_HOST_ID) {
50691 pr_err("Unexpected apdu_reader pipe on host %x\n",
50692 info->src_host_id);
50693 + kfree_skb(skb_pipe_info);
50697 @@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50698 hdev->pipes[st21nfca_gates[j].pipe].dest_host =
50701 + kfree_skb(skb_pipe_info);
50705 @@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50706 st21nfca_gates[i].gate,
50707 st21nfca_gates[i].pipe);
50714 memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
50716 - kfree_skb(skb_pipe_info);
50718 kfree_skb(skb_pipe_list);
50721 diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50722 index cde35c5d01..2dbfdbbf 100644
50723 --- a/drivers/of/fdt.c
50724 +++ b/drivers/of/fdt.c
50725 @@ -1136,7 +1136,9 @@ static int __init of_fdt_raw_init(void)
50726 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50729 - of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50730 + pax_open_kernel();
50731 + *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50732 + pax_close_kernel();
50733 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50735 late_initcall(of_fdt_raw_init);
50736 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50737 index 82f7000..d6d0447 100644
50738 --- a/drivers/oprofile/buffer_sync.c
50739 +++ b/drivers/oprofile/buffer_sync.c
50740 @@ -345,7 +345,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50741 if (cookie == NO_COOKIE)
50743 if (cookie == INVALID_COOKIE) {
50744 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50745 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50748 if (cookie != last_cookie) {
50749 @@ -389,14 +389,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50750 /* add userspace sample */
50753 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
50754 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50758 cookie = lookup_dcookie(mm, s->eip, &offset);
50760 if (cookie == INVALID_COOKIE) {
50761 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50762 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50766 @@ -554,7 +554,7 @@ void sync_buffer(int cpu)
50767 /* ignore backtraces if failed to add a sample */
50768 if (state == sb_bt_start) {
50769 state = sb_bt_ignore;
50770 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50771 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50775 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50776 index c0cc4e7..44d4e54 100644
50777 --- a/drivers/oprofile/event_buffer.c
50778 +++ b/drivers/oprofile/event_buffer.c
50779 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50782 if (buffer_pos == buffer_size) {
50783 - atomic_inc(&oprofile_stats.event_lost_overflow);
50784 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50788 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50789 index ed2c3ec..deda85a 100644
50790 --- a/drivers/oprofile/oprof.c
50791 +++ b/drivers/oprofile/oprof.c
50792 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50793 if (oprofile_ops.switch_events())
50796 - atomic_inc(&oprofile_stats.multiplex_counter);
50797 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50798 start_switch_worker();
50801 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50802 index ee2cfce..7f8f699 100644
50803 --- a/drivers/oprofile/oprofile_files.c
50804 +++ b/drivers/oprofile/oprofile_files.c
50805 @@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50807 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50809 -static ssize_t timeout_read(struct file *file, char __user *buf,
50810 +static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50811 size_t count, loff_t *offset)
50813 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50814 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50815 index 59659ce..6c860a0 100644
50816 --- a/drivers/oprofile/oprofile_stats.c
50817 +++ b/drivers/oprofile/oprofile_stats.c
50818 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50819 cpu_buf->sample_invalid_eip = 0;
50822 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50823 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50824 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
50825 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50826 - atomic_set(&oprofile_stats.multiplex_counter, 0);
50827 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50828 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50829 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50830 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50831 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50835 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50836 index 1fc622b..8c48fc3 100644
50837 --- a/drivers/oprofile/oprofile_stats.h
50838 +++ b/drivers/oprofile/oprofile_stats.h
50839 @@ -13,11 +13,11 @@
50840 #include <linux/atomic.h>
50842 struct oprofile_stat_struct {
50843 - atomic_t sample_lost_no_mm;
50844 - atomic_t sample_lost_no_mapping;
50845 - atomic_t bt_lost_no_mapping;
50846 - atomic_t event_lost_overflow;
50847 - atomic_t multiplex_counter;
50848 + atomic_unchecked_t sample_lost_no_mm;
50849 + atomic_unchecked_t sample_lost_no_mapping;
50850 + atomic_unchecked_t bt_lost_no_mapping;
50851 + atomic_unchecked_t event_lost_overflow;
50852 + atomic_unchecked_t multiplex_counter;
50855 extern struct oprofile_stat_struct oprofile_stats;
50856 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50857 index dd92c5e..dfc04b5 100644
50858 --- a/drivers/oprofile/oprofilefs.c
50859 +++ b/drivers/oprofile/oprofilefs.c
50860 @@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50862 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50864 - atomic_t *val = file->private_data;
50865 - return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50866 + atomic_unchecked_t *val = file->private_data;
50867 + return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50871 @@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50874 int oprofilefs_create_ro_atomic(struct dentry *root,
50875 - char const *name, atomic_t *val)
50876 + char const *name, atomic_unchecked_t *val)
50878 return __oprofilefs_create_file(root, name,
50879 &atomic_ro_fops, 0444, val);
50880 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50881 index bdef916..88c7dee 100644
50882 --- a/drivers/oprofile/timer_int.c
50883 +++ b/drivers/oprofile/timer_int.c
50884 @@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50888 -static struct notifier_block __refdata oprofile_cpu_notifier = {
50889 +static struct notifier_block oprofile_cpu_notifier = {
50890 .notifier_call = oprofile_cpu_notify,
50893 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50894 index 3b47080..6cd05dd 100644
50895 --- a/drivers/parport/procfs.c
50896 +++ b/drivers/parport/procfs.c
50897 @@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50901 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50902 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50905 #ifdef CONFIG_PARPORT_1284
50906 @@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50910 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50911 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50913 #endif /* IEEE1284.3 support. */
50915 diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50916 index ba46e58..90cfc24 100644
50917 --- a/drivers/pci/host/pci-host-generic.c
50918 +++ b/drivers/pci/host/pci-host-generic.c
50920 #include <linux/platform_device.h>
50922 struct gen_pci_cfg_bus_ops {
50923 + struct pci_ops ops;
50925 - void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50929 struct gen_pci_cfg_windows {
50930 struct resource res;
50931 @@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50934 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50936 + .map_bus = gen_pci_map_cfg_bus_cam,
50937 + .read = pci_generic_config_read,
50938 + .write = pci_generic_config_write,
50941 - .map_bus = gen_pci_map_cfg_bus_cam,
50944 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50945 @@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50948 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50950 + .map_bus = gen_pci_map_cfg_bus_ecam,
50951 + .read = pci_generic_config_read,
50952 + .write = pci_generic_config_write,
50955 - .map_bus = gen_pci_map_cfg_bus_ecam,
50958 -static struct pci_ops gen_pci_ops = {
50959 - .read = pci_generic_config_read,
50960 - .write = pci_generic_config_write,
50963 static const struct of_device_id gen_pci_of_match[] = {
50964 @@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50965 .private_data = (void **)&pci,
50966 .setup = gen_pci_setup,
50967 .map_irq = of_irq_parse_and_map_pci,
50968 - .ops = &gen_pci_ops,
50972 @@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50974 of_id = of_match_node(gen_pci_of_match, np);
50975 pci->cfg.ops = of_id->data;
50976 - gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50977 + hw.ops = &pci->cfg.ops->ops;
50978 pci->host.dev.parent = dev;
50979 INIT_LIST_HEAD(&pci->host.windows);
50980 INIT_LIST_HEAD(&pci->resources);
50981 diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50982 index 6ca2399..68d866b 100644
50983 --- a/drivers/pci/hotplug/acpiphp_ibm.c
50984 +++ b/drivers/pci/hotplug/acpiphp_ibm.c
50985 @@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50989 - ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50990 + pax_open_kernel();
50991 + *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50992 + pax_close_kernel();
50993 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50996 diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50997 index 66b7bbe..26bee78 100644
50998 --- a/drivers/pci/hotplug/cpcihp_generic.c
50999 +++ b/drivers/pci/hotplug/cpcihp_generic.c
51000 @@ -73,7 +73,6 @@ static u16 port;
51001 static unsigned int enum_bit;
51002 static u8 enum_mask;
51004 -static struct cpci_hp_controller_ops generic_hpc_ops;
51005 static struct cpci_hp_controller generic_hpc;
51007 static int __init validate_parameters(void)
51008 @@ -139,6 +138,10 @@ static int query_enum(void)
51009 return ((value & enum_mask) == enum_mask);
51012 +static struct cpci_hp_controller_ops generic_hpc_ops = {
51013 + .query_enum = query_enum,
51016 static int __init cpcihp_generic_init(void)
51019 @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
51022 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
51023 - generic_hpc_ops.query_enum = query_enum;
51024 generic_hpc.ops = &generic_hpc_ops;
51026 status = cpci_hp_register_controller(&generic_hpc);
51027 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
51028 index 7ecf34e..effed62 100644
51029 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
51030 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
51032 /* local variables */
51035 -static struct cpci_hp_controller_ops zt5550_hpc_ops;
51036 static struct cpci_hp_controller zt5550_hpc;
51038 /* Primary cPCI bus bridge device */
51039 @@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
51043 +static struct cpci_hp_controller_ops zt5550_hpc_ops = {
51044 + .query_enum = zt5550_hc_query_enum,
51047 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
51050 @@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51051 dbg("returned from zt5550_hc_config");
51053 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51054 - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51055 zt5550_hpc.ops = &zt5550_hpc_ops;
51057 zt5550_hpc.irq = hc_dev->irq;
51058 zt5550_hpc.irq_flags = IRQF_SHARED;
51059 zt5550_hpc.dev_id = hc_dev;
51061 - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51062 - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51063 - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51064 + pax_open_kernel();
51065 + *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51066 + *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51067 + *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51068 + pax_open_kernel();
51070 info("using ENUM# polling mode");
51072 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51073 index 1e08ff8c..3cd145f 100644
51074 --- a/drivers/pci/hotplug/cpqphp_nvram.c
51075 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
51076 @@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
51078 void compaq_nvram_init (void __iomem *rom_start)
51080 +#ifndef CONFIG_PAX_KERNEXEC
51082 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51085 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51087 diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51088 index 56d8486..f26113f 100644
51089 --- a/drivers/pci/hotplug/pci_hotplug_core.c
51090 +++ b/drivers/pci/hotplug/pci_hotplug_core.c
51091 @@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51095 - slot->ops->owner = owner;
51096 - slot->ops->mod_name = mod_name;
51097 + pax_open_kernel();
51098 + *(struct module **)&slot->ops->owner = owner;
51099 + *(const char **)&slot->ops->mod_name = mod_name;
51100 + pax_close_kernel();
51102 mutex_lock(&pci_hp_mutex);
51104 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51105 index 07aa722..84514b4 100644
51106 --- a/drivers/pci/hotplug/pciehp_core.c
51107 +++ b/drivers/pci/hotplug/pciehp_core.c
51108 @@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51109 struct slot *slot = ctrl->slot;
51110 struct hotplug_slot *hotplug = NULL;
51111 struct hotplug_slot_info *info = NULL;
51112 - struct hotplug_slot_ops *ops = NULL;
51113 + hotplug_slot_ops_no_const *ops = NULL;
51114 char name[SLOT_NAME_SIZE];
51115 int retval = -ENOMEM;
51117 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51118 index c3e7dfc..cbd9625 100644
51119 --- a/drivers/pci/msi.c
51120 +++ b/drivers/pci/msi.c
51121 @@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51123 struct attribute **msi_attrs;
51124 struct attribute *msi_attr;
51125 - struct device_attribute *msi_dev_attr;
51126 - struct attribute_group *msi_irq_group;
51127 + device_attribute_no_const *msi_dev_attr;
51128 + attribute_group_no_const *msi_irq_group;
51129 const struct attribute_group **msi_irq_groups;
51130 struct msi_desc *entry;
51132 @@ -573,7 +573,7 @@ error_attrs:
51134 msi_attr = msi_attrs[count];
51136 - msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51137 + msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51138 kfree(msi_attr->name);
51139 kfree(msi_dev_attr);
51141 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51142 index 312f23a..d21181c 100644
51143 --- a/drivers/pci/pci-sysfs.c
51144 +++ b/drivers/pci/pci-sysfs.c
51145 @@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51147 /* allocate attribute structure, piggyback attribute name */
51148 int name_len = write_combine ? 13 : 10;
51149 - struct bin_attribute *res_attr;
51150 + bin_attribute_no_const *res_attr;
51153 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51154 @@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51155 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51158 - struct bin_attribute *attr;
51159 + bin_attribute_no_const *attr;
51161 /* If the device has VPD, try to expose it in sysfs. */
51163 @@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51167 - struct bin_attribute *attr;
51168 + bin_attribute_no_const *attr;
51170 if (!sysfs_initialized)
51172 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51173 index 9bd762c2..6fb9504 100644
51174 --- a/drivers/pci/pci.h
51175 +++ b/drivers/pci/pci.h
51176 @@ -99,7 +99,7 @@ struct pci_vpd_ops {
51179 const struct pci_vpd_ops *ops;
51180 - struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51181 + bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51184 int pci_vpd_pci22_init(struct pci_dev *dev);
51185 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51186 index 7d4fcdc..2f6d8f8 100644
51187 --- a/drivers/pci/pcie/aspm.c
51188 +++ b/drivers/pci/pcie/aspm.c
51190 #define MODULE_PARAM_PREFIX "pcie_aspm."
51192 /* Note: those are not register definitions */
51193 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51194 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51195 -#define ASPM_STATE_L1 (4) /* L1 state */
51196 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51197 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51198 +#define ASPM_STATE_L1 (4U) /* L1 state */
51199 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51200 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51202 diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
51203 index be35da2..ec16cdb 100644
51204 --- a/drivers/pci/pcie/portdrv_pci.c
51205 +++ b/drivers/pci/pcie/portdrv_pci.c
51206 @@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
51210 -static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
51211 +static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
51213 * Boxes that should not use MSI for PCIe PME signaling.
51215 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51216 index c911857..56f3f9d 100644
51217 --- a/drivers/pci/probe.c
51218 +++ b/drivers/pci/probe.c
51219 @@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51221 struct pci_bus_region region, inverted_region;
51223 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51224 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51226 /* No printks while decoding is disabled! */
51227 if (!dev->mmio_always_on) {
51228 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51229 index 3f155e7..0f4b1f0 100644
51230 --- a/drivers/pci/proc.c
51231 +++ b/drivers/pci/proc.c
51232 @@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51233 static int __init pci_proc_init(void)
51235 struct pci_dev *dev = NULL;
51237 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
51238 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51239 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51240 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51241 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51244 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51246 proc_create("devices", 0, proc_bus_pci_dir,
51247 &proc_bus_pci_dev_operations);
51248 proc_initialized = 1;
51249 diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
51250 index 3474920..acc9581 100644
51251 --- a/drivers/platform/chrome/chromeos_pstore.c
51252 +++ b/drivers/platform/chrome/chromeos_pstore.c
51254 #include <linux/platform_device.h>
51255 #include <linux/pstore_ram.h>
51257 -static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
51258 +static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
51261 * Today all Chromebooks/boxes ship with Google_* as version and
51262 diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51263 index 1e1e594..8fe59c5 100644
51264 --- a/drivers/platform/x86/alienware-wmi.c
51265 +++ b/drivers/platform/x86/alienware-wmi.c
51266 @@ -150,7 +150,7 @@ struct wmax_led_args {
51269 static struct platform_device *platform_device;
51270 -static struct device_attribute *zone_dev_attrs;
51271 +static device_attribute_no_const *zone_dev_attrs;
51272 static struct attribute **zone_attrs;
51273 static struct platform_zone *zone_data;
51275 @@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
51279 -static struct attribute_group zone_attribute_group = {
51280 +static attribute_group_no_const zone_attribute_group = {
51281 .name = "rgb_zones",
51284 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51285 index 7543a56..367ca8ed 100644
51286 --- a/drivers/platform/x86/asus-wmi.c
51287 +++ b/drivers/platform/x86/asus-wmi.c
51288 @@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
51292 +#ifdef CONFIG_GRKERNSEC_KMEM
51296 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51299 @@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
51303 +#ifdef CONFIG_GRKERNSEC_KMEM
51307 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51310 @@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51311 union acpi_object *obj;
51312 acpi_status status;
51314 +#ifdef CONFIG_GRKERNSEC_KMEM
51318 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51319 1, asus->debug.method_id,
51321 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51322 index b4e9447..9dc6ec34 100644
51323 --- a/drivers/platform/x86/compal-laptop.c
51324 +++ b/drivers/platform/x86/compal-laptop.c
51325 @@ -765,7 +765,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51329 -static struct dmi_system_id __initdata compal_dmi_table[] = {
51330 +static const struct dmi_system_id __initconst compal_dmi_table[] = {
51332 .ident = "FL90/IFL90",
51334 diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51335 index 458e6c9..089aee7 100644
51336 --- a/drivers/platform/x86/hdaps.c
51337 +++ b/drivers/platform/x86/hdaps.c
51338 @@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51339 "ThinkPad T42p", so the order of the entries matters.
51340 If your ThinkPad is not recognized, please update to latest
51341 BIOS. This is especially the case for some R52 ThinkPads. */
51342 -static struct dmi_system_id __initdata hdaps_whitelist[] = {
51343 +static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51344 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51345 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51346 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51347 diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51348 index 97c2be1..2ee50ce 100644
51349 --- a/drivers/platform/x86/ibm_rtl.c
51350 +++ b/drivers/platform/x86/ibm_rtl.c
51351 @@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51355 -static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51356 +static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51359 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51360 diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51361 index 8037c8b..f88445c 100644
51362 --- a/drivers/platform/x86/intel_oaktrail.c
51363 +++ b/drivers/platform/x86/intel_oaktrail.c
51364 @@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51368 -static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51369 +static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51371 .ident = "OakTrail platform",
51373 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51374 index 0859877..59d596d 100644
51375 --- a/drivers/platform/x86/msi-laptop.c
51376 +++ b/drivers/platform/x86/msi-laptop.c
51377 @@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51381 -static struct dmi_system_id __initdata msi_dmi_table[] = {
51382 +static const struct dmi_system_id __initconst msi_dmi_table[] = {
51384 .ident = "MSI S270",
51386 @@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51388 if (!quirks->ec_read_only) {
51389 /* allow userland write sysfs file */
51390 - dev_attr_bluetooth.store = store_bluetooth;
51391 - dev_attr_wlan.store = store_wlan;
51392 - dev_attr_threeg.store = store_threeg;
51393 - dev_attr_bluetooth.attr.mode |= S_IWUSR;
51394 - dev_attr_wlan.attr.mode |= S_IWUSR;
51395 - dev_attr_threeg.attr.mode |= S_IWUSR;
51396 + pax_open_kernel();
51397 + *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51398 + *(void **)&dev_attr_wlan.store = store_wlan;
51399 + *(void **)&dev_attr_threeg.store = store_threeg;
51400 + *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51401 + *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51402 + *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51403 + pax_close_kernel();
51406 /* disable hardware control by fn key */
51407 diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51408 index 6d2bac0..ec2b029 100644
51409 --- a/drivers/platform/x86/msi-wmi.c
51410 +++ b/drivers/platform/x86/msi-wmi.c
51411 @@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51412 static void msi_wmi_notify(u32 value, void *context)
51414 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51415 - static struct key_entry *key;
51416 + struct key_entry *key;
51417 union acpi_object *obj;
51418 acpi_status status;
51420 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51421 index 9e701b2..c68a7b5 100644
51422 --- a/drivers/platform/x86/samsung-laptop.c
51423 +++ b/drivers/platform/x86/samsung-laptop.c
51424 @@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51428 -static struct dmi_system_id __initdata samsung_dmi_table[] = {
51429 +static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51432 DMI_MATCH(DMI_SYS_VENDOR,
51433 diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51434 index e6aac72..e11ff24 100644
51435 --- a/drivers/platform/x86/samsung-q10.c
51436 +++ b/drivers/platform/x86/samsung-q10.c
51437 @@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51441 -static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51442 +static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51444 .ident = "Samsung Q10",
51446 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51447 index e51c1e7..71bb385 100644
51448 --- a/drivers/platform/x86/sony-laptop.c
51449 +++ b/drivers/platform/x86/sony-laptop.c
51450 @@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51453 /* High speed charging function */
51454 -static struct device_attribute *hsc_handle;
51455 +static device_attribute_no_const *hsc_handle;
51457 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51458 struct device_attribute *attr,
51459 @@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51462 /* low battery function */
51463 -static struct device_attribute *lowbatt_handle;
51464 +static device_attribute_no_const *lowbatt_handle;
51466 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51467 struct device_attribute *attr,
51468 @@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51471 /* fan speed function */
51472 -static struct device_attribute *fan_handle, *hsf_handle;
51473 +static device_attribute_no_const *fan_handle, *hsf_handle;
51475 static ssize_t sony_nc_hsfan_store(struct device *dev,
51476 struct device_attribute *attr,
51477 @@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51480 /* USB charge function */
51481 -static struct device_attribute *uc_handle;
51482 +static device_attribute_no_const *uc_handle;
51484 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51485 struct device_attribute *attr,
51486 @@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51489 /* Panel ID function */
51490 -static struct device_attribute *panel_handle;
51491 +static device_attribute_no_const *panel_handle;
51493 static ssize_t sony_nc_panelid_show(struct device *dev,
51494 struct device_attribute *attr, char *buffer)
51495 @@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51498 /* smart connect function */
51499 -static struct device_attribute *sc_handle;
51500 +static device_attribute_no_const *sc_handle;
51502 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51503 struct device_attribute *attr,
51504 @@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51505 .drv.pm = &sony_pic_pm,
51508 -static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51509 +static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51511 .ident = "Sony Vaio",
51513 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51514 index 28f3281..171d8c3 100644
51515 --- a/drivers/platform/x86/thinkpad_acpi.c
51516 +++ b/drivers/platform/x86/thinkpad_acpi.c
51517 @@ -2459,10 +2459,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51518 && !tp_features.bright_unkfw)
51519 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51523 #undef TPACPI_COMPARE_KEY
51524 #undef TPACPI_MAY_SEND_KEY
51529 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51530 index 438d4c7..ca8a2fb 100644
51531 --- a/drivers/pnp/pnpbios/bioscalls.c
51532 +++ b/drivers/pnp/pnpbios/bioscalls.c
51533 @@ -59,7 +59,7 @@ do { \
51534 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51537 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51538 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51539 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51542 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51545 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51547 + pax_open_kernel();
51548 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51549 + pax_close_kernel();
51551 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51552 spin_lock_irqsave(&pnp_bios_lock, flags);
51553 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51555 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51557 + pax_open_kernel();
51558 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51559 + pax_close_kernel();
51563 /* If we get here and this is set then the PnP BIOS faulted on us. */
51564 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51568 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
51569 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51573 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51574 pnp_bios_callpoint.offset = header->fields.pm16offset;
51575 pnp_bios_callpoint.segment = PNP_CS16;
51577 + pax_open_kernel();
51579 for_each_possible_cpu(i) {
51580 struct desc_struct *gdt = get_cpu_gdt_table(i);
51582 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51583 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51584 (unsigned long)__va(header->fields.pm16dseg));
51587 + pax_close_kernel();
51589 diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51590 index facd43b..b291260 100644
51591 --- a/drivers/pnp/pnpbios/core.c
51592 +++ b/drivers/pnp/pnpbios/core.c
51593 @@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51597 -static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51598 +static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51599 { /* PnPBIOS GPF on boot */
51600 .callback = exploding_pnp_bios,
51601 .ident = "Higraded P14H",
51602 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51603 index dfe1ee8..67e820c 100644
51604 --- a/drivers/power/pda_power.c
51605 +++ b/drivers/power/pda_power.c
51606 @@ -38,7 +38,11 @@ static struct power_supply *pda_psy_ac, *pda_psy_usb;
51608 #if IS_ENABLED(CONFIG_USB_PHY)
51609 static struct usb_phy *transceiver;
51610 -static struct notifier_block otg_nb;
51611 +static int otg_handle_notification(struct notifier_block *nb,
51612 + unsigned long event, void *unused);
51613 +static struct notifier_block otg_nb = {
51614 + .notifier_call = otg_handle_notification
51618 static struct regulator *ac_draw;
51619 @@ -373,7 +377,6 @@ static int pda_power_probe(struct platform_device *pdev)
51621 #if IS_ENABLED(CONFIG_USB_PHY)
51622 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51623 - otg_nb.notifier_call = otg_handle_notification;
51624 ret = usb_register_notifier(transceiver, &otg_nb);
51626 dev_err(dev, "failure to register otg notifier\n");
51627 diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51628 index cc439fd..8fa30df 100644
51629 --- a/drivers/power/power_supply.h
51630 +++ b/drivers/power/power_supply.h
51631 @@ -16,12 +16,12 @@ struct power_supply;
51633 #ifdef CONFIG_SYSFS
51635 -extern void power_supply_init_attrs(struct device_type *dev_type);
51636 +extern void power_supply_init_attrs(void);
51637 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51641 -static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51642 +static inline void power_supply_init_attrs(void) {}
51643 #define power_supply_uevent NULL
51645 #endif /* CONFIG_SYSFS */
51646 diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51647 index 4bc0c7f..198c99d 100644
51648 --- a/drivers/power/power_supply_core.c
51649 +++ b/drivers/power/power_supply_core.c
51650 @@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51651 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51652 EXPORT_SYMBOL_GPL(power_supply_notifier);
51654 -static struct device_type power_supply_dev_type;
51655 +extern const struct attribute_group *power_supply_attr_groups[];
51656 +static struct device_type power_supply_dev_type = {
51657 + .groups = power_supply_attr_groups,
51660 #define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10)
51662 @@ -921,7 +924,7 @@ static int __init power_supply_class_init(void)
51663 return PTR_ERR(power_supply_class);
51665 power_supply_class->dev_uevent = power_supply_uevent;
51666 - power_supply_init_attrs(&power_supply_dev_type);
51667 + power_supply_init_attrs();
51671 diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51672 index 9134e3d..45eee1e 100644
51673 --- a/drivers/power/power_supply_sysfs.c
51674 +++ b/drivers/power/power_supply_sysfs.c
51675 @@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51676 .is_visible = power_supply_attr_is_visible,
51679 -static const struct attribute_group *power_supply_attr_groups[] = {
51680 +const struct attribute_group *power_supply_attr_groups[] = {
51681 &power_supply_attr_group,
51685 -void power_supply_init_attrs(struct device_type *dev_type)
51686 +void power_supply_init_attrs(void)
51690 - dev_type->groups = power_supply_attr_groups;
51692 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51693 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51695 diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51696 index 84419af..268ede8 100644
51697 --- a/drivers/powercap/powercap_sys.c
51698 +++ b/drivers/powercap/powercap_sys.c
51699 @@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51700 struct device_attribute name_attr;
51703 +static ssize_t show_constraint_name(struct device *dev,
51704 + struct device_attribute *dev_attr,
51707 static struct powercap_constraint_attr
51708 - constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51709 + constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51710 + [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51711 + .power_limit_attr = {
51714 + .mode = S_IWUSR | S_IRUGO
51716 + .show = show_constraint_power_limit_uw,
51717 + .store = store_constraint_power_limit_uw
51720 + .time_window_attr = {
51723 + .mode = S_IWUSR | S_IRUGO
51725 + .show = show_constraint_time_window_us,
51726 + .store = store_constraint_time_window_us
51729 + .max_power_attr = {
51734 + .show = show_constraint_max_power_uw,
51738 + .min_power_attr = {
51743 + .show = show_constraint_min_power_uw,
51747 + .max_time_window_attr = {
51752 + .show = show_constraint_max_time_window_us,
51756 + .min_time_window_attr = {
51761 + .show = show_constraint_min_time_window_us,
51770 + .show = show_constraint_name,
51776 /* A list of powercap control_types */
51777 static LIST_HEAD(powercap_cntrl_list);
51778 @@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51781 static int create_constraint_attribute(int id, const char *name,
51783 - struct device_attribute *dev_attr,
51784 - ssize_t (*show)(struct device *,
51785 - struct device_attribute *, char *),
51786 - ssize_t (*store)(struct device *,
51787 - struct device_attribute *,
51788 - const char *, size_t)
51790 + struct device_attribute *dev_attr)
51792 + name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51794 - dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51796 - if (!dev_attr->attr.name)
51799 - dev_attr->attr.mode = mode;
51800 - dev_attr->show = show;
51801 - dev_attr->store = store;
51803 + pax_open_kernel();
51804 + *(const char **)&dev_attr->attr.name = name;
51805 + pax_close_kernel();
51809 @@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51811 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51812 ret = create_constraint_attribute(i, "power_limit_uw",
51813 - S_IWUSR | S_IRUGO,
51814 - &constraint_attrs[i].power_limit_attr,
51815 - show_constraint_power_limit_uw,
51816 - store_constraint_power_limit_uw);
51817 + &constraint_attrs[i].power_limit_attr);
51820 ret = create_constraint_attribute(i, "time_window_us",
51821 - S_IWUSR | S_IRUGO,
51822 - &constraint_attrs[i].time_window_attr,
51823 - show_constraint_time_window_us,
51824 - store_constraint_time_window_us);
51825 + &constraint_attrs[i].time_window_attr);
51828 - ret = create_constraint_attribute(i, "name", S_IRUGO,
51829 - &constraint_attrs[i].name_attr,
51830 - show_constraint_name,
51832 + ret = create_constraint_attribute(i, "name",
51833 + &constraint_attrs[i].name_attr);
51836 - ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51837 - &constraint_attrs[i].max_power_attr,
51838 - show_constraint_max_power_uw,
51840 + ret = create_constraint_attribute(i, "max_power_uw",
51841 + &constraint_attrs[i].max_power_attr);
51844 - ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51845 - &constraint_attrs[i].min_power_attr,
51846 - show_constraint_min_power_uw,
51848 + ret = create_constraint_attribute(i, "min_power_uw",
51849 + &constraint_attrs[i].min_power_attr);
51852 ret = create_constraint_attribute(i, "max_time_window_us",
51854 - &constraint_attrs[i].max_time_window_attr,
51855 - show_constraint_max_time_window_us,
51857 + &constraint_attrs[i].max_time_window_attr);
51860 ret = create_constraint_attribute(i, "min_time_window_us",
51862 - &constraint_attrs[i].min_time_window_attr,
51863 - show_constraint_min_time_window_us,
51865 + &constraint_attrs[i].min_time_window_attr);
51869 @@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51870 power_zone->zone_dev_attrs[count++] =
51871 &dev_attr_max_energy_range_uj.attr;
51872 if (power_zone->ops->get_energy_uj) {
51873 + pax_open_kernel();
51874 if (power_zone->ops->reset_energy_uj)
51875 - dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51876 + *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51878 - dev_attr_energy_uj.attr.mode = S_IRUGO;
51879 + *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51880 + pax_close_kernel();
51881 power_zone->zone_dev_attrs[count++] =
51882 &dev_attr_energy_uj.attr;
51884 diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51885 index 9c5d414..c7900ce 100644
51886 --- a/drivers/ptp/ptp_private.h
51887 +++ b/drivers/ptp/ptp_private.h
51888 @@ -51,7 +51,7 @@ struct ptp_clock {
51889 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51890 wait_queue_head_t tsev_wq;
51891 int defunct; /* tells readers to go away when clock is being removed */
51892 - struct device_attribute *pin_dev_attr;
51893 + device_attribute_no_const *pin_dev_attr;
51894 struct attribute **pin_attr;
51895 struct attribute_group pin_attr_group;
51897 diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51898 index 302e626..12579af 100644
51899 --- a/drivers/ptp/ptp_sysfs.c
51900 +++ b/drivers/ptp/ptp_sysfs.c
51901 @@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51904 for (i = 0; i < n_pins; i++) {
51905 - struct device_attribute *da = &ptp->pin_dev_attr[i];
51906 + device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51907 sysfs_attr_init(&da->attr);
51908 da->attr.name = info->pin_config[i].name;
51909 da->attr.mode = 0644;
51910 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51911 index 8a28116..05b0ad5 100644
51912 --- a/drivers/regulator/core.c
51913 +++ b/drivers/regulator/core.c
51914 @@ -3603,7 +3603,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51915 const struct regulation_constraints *constraints = NULL;
51916 const struct regulator_init_data *init_data;
51917 struct regulator_config *config = NULL;
51918 - static atomic_t regulator_no = ATOMIC_INIT(-1);
51919 + static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51920 struct regulator_dev *rdev;
51921 struct device *dev;
51923 @@ -3686,7 +3686,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51924 rdev->dev.class = ®ulator_class;
51925 rdev->dev.parent = dev;
51926 dev_set_name(&rdev->dev, "regulator.%lu",
51927 - (unsigned long) atomic_inc_return(®ulator_no));
51928 + (unsigned long) atomic_inc_return_unchecked(®ulator_no));
51929 ret = device_register(&rdev->dev);
51931 put_device(&rdev->dev);
51932 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51933 index 4071d74..260b15a 100644
51934 --- a/drivers/regulator/max8660.c
51935 +++ b/drivers/regulator/max8660.c
51936 @@ -423,8 +423,10 @@ static int max8660_probe(struct i2c_client *client,
51937 max8660->shadow_regs[MAX8660_OVER1] = 5;
51939 /* Otherwise devices can be toggled via software */
51940 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
51941 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
51942 + pax_open_kernel();
51943 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51944 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51945 + pax_close_kernel();
51949 diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51950 index c3d55c2..0dddfe6 100644
51951 --- a/drivers/regulator/max8973-regulator.c
51952 +++ b/drivers/regulator/max8973-regulator.c
51953 @@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51954 if (!pdata || !pdata->enable_ext_control) {
51955 max->desc.enable_reg = MAX8973_VOUT;
51956 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51957 - max->ops.enable = regulator_enable_regmap;
51958 - max->ops.disable = regulator_disable_regmap;
51959 - max->ops.is_enabled = regulator_is_enabled_regmap;
51960 + pax_open_kernel();
51961 + *(void **)&max->ops.enable = regulator_enable_regmap;
51962 + *(void **)&max->ops.disable = regulator_disable_regmap;
51963 + *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51964 + pax_close_kernel();
51968 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51969 index 0d17c92..a29f627 100644
51970 --- a/drivers/regulator/mc13892-regulator.c
51971 +++ b/drivers/regulator/mc13892-regulator.c
51972 @@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51973 mc13xxx_unlock(mc13892);
51975 /* update mc13892_vcam ops */
51976 - memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51977 + pax_open_kernel();
51978 + memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51979 sizeof(struct regulator_ops));
51980 - mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51981 - mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51982 + *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51983 + *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51984 + pax_close_kernel();
51985 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51987 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51988 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51989 index a82556a0..e842923 100644
51990 --- a/drivers/rtc/rtc-cmos.c
51991 +++ b/drivers/rtc/rtc-cmos.c
51992 @@ -793,7 +793,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51993 hpet_rtc_timer_init();
51995 /* export at least the first block of NVRAM */
51996 - nvram.size = address_space - NVRAM_OFFSET;
51997 + pax_open_kernel();
51998 + *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51999 + pax_close_kernel();
52000 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
52002 dev_dbg(dev, "can't create nvram file? %d\n", retval);
52003 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
52004 index 799c34b..8e9786a 100644
52005 --- a/drivers/rtc/rtc-dev.c
52006 +++ b/drivers/rtc/rtc-dev.c
52008 #include <linux/module.h>
52009 #include <linux/rtc.h>
52010 #include <linux/sched.h>
52011 +#include <linux/grsecurity.h>
52012 #include "rtc-core.h"
52014 static dev_t rtc_devt;
52015 @@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
52016 if (copy_from_user(&tm, uarg, sizeof(tm)))
52019 + gr_log_timechange();
52021 return rtc_set_time(rtc, &tm);
52024 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
52025 index 4ffabb3..1f87fca 100644
52026 --- a/drivers/rtc/rtc-ds1307.c
52027 +++ b/drivers/rtc/rtc-ds1307.c
52028 @@ -107,7 +107,7 @@ struct ds1307 {
52029 u8 offset; /* register's offset */
52032 - struct bin_attribute *nvram;
52033 + bin_attribute_no_const *nvram;
52035 unsigned long flags;
52036 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
52037 diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
52038 index 90abb5b..e0bf6dd 100644
52039 --- a/drivers/rtc/rtc-m48t59.c
52040 +++ b/drivers/rtc/rtc-m48t59.c
52041 @@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52042 if (IS_ERR(m48t59->rtc))
52043 return PTR_ERR(m48t59->rtc);
52045 - m48t59_nvram_attr.size = pdata->offset;
52046 + pax_open_kernel();
52047 + *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52048 + pax_close_kernel();
52050 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52052 diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
52053 index 3a2da4c..e88493c 100644
52054 --- a/drivers/rtc/rtc-test.c
52055 +++ b/drivers/rtc/rtc-test.c
52056 @@ -112,8 +112,10 @@ static int test_probe(struct platform_device *plat_dev)
52057 struct rtc_device *rtc;
52060 - test_rtc_ops.set_mmss64 = test_rtc_set_mmss64;
52061 - test_rtc_ops.set_mmss = NULL;
52062 + pax_open_kernel();
52063 + *(void **)&test_rtc_ops.set_mmss64 = test_rtc_set_mmss64;
52064 + *(void **)&test_rtc_ops.set_mmss = NULL;
52065 + pax_close_kernel();
52068 rtc = devm_rtc_device_register(&plat_dev->dev, "test",
52069 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52070 index e693af6..2e525b6 100644
52071 --- a/drivers/scsi/bfa/bfa_fcpim.h
52072 +++ b/drivers/scsi/bfa/bfa_fcpim.h
52073 @@ -36,7 +36,7 @@ struct bfa_iotag_s {
52076 bfa_isr_func_t isr;
52080 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52081 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52082 diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52083 index 0f19455..ef7adb5 100644
52084 --- a/drivers/scsi/bfa/bfa_fcs.c
52085 +++ b/drivers/scsi/bfa/bfa_fcs.c
52086 @@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52087 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52089 static struct bfa_fcs_mod_s fcs_modules[] = {
52090 - { bfa_fcs_port_attach, NULL, NULL },
52091 - { bfa_fcs_uf_attach, NULL, NULL },
52092 - { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52093 - bfa_fcs_fabric_modexit },
52095 + .attach = bfa_fcs_port_attach,
52100 + .attach = bfa_fcs_uf_attach,
52105 + .attach = bfa_fcs_fabric_attach,
52106 + .modinit = bfa_fcs_fabric_modinit,
52107 + .modexit = bfa_fcs_fabric_modexit
52112 diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52113 index ff75ef8..2dfe00a 100644
52114 --- a/drivers/scsi/bfa/bfa_fcs_lport.c
52115 +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52116 @@ -89,15 +89,26 @@ static struct {
52117 void (*offline) (struct bfa_fcs_lport_s *port);
52118 } __port_action[] = {
52120 - bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52121 - bfa_fcs_lport_unknown_offline}, {
52122 - bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52123 - bfa_fcs_lport_fab_offline}, {
52124 - bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52125 - bfa_fcs_lport_n2n_offline}, {
52126 - bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52127 - bfa_fcs_lport_loop_offline},
52129 + .init = bfa_fcs_lport_unknown_init,
52130 + .online = bfa_fcs_lport_unknown_online,
52131 + .offline = bfa_fcs_lport_unknown_offline
52134 + .init = bfa_fcs_lport_fab_init,
52135 + .online = bfa_fcs_lport_fab_online,
52136 + .offline = bfa_fcs_lport_fab_offline
52139 + .init = bfa_fcs_lport_n2n_init,
52140 + .online = bfa_fcs_lport_n2n_online,
52141 + .offline = bfa_fcs_lport_n2n_offline
52144 + .init = bfa_fcs_lport_loop_init,
52145 + .online = bfa_fcs_lport_loop_online,
52146 + .offline = bfa_fcs_lport_loop_offline
52151 * fcs_port_sm FCS logical port state machine
52152 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52153 index a38aafa0..fe8f03b 100644
52154 --- a/drivers/scsi/bfa/bfa_ioc.h
52155 +++ b/drivers/scsi/bfa/bfa_ioc.h
52156 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52157 bfa_ioc_disable_cbfn_t disable_cbfn;
52158 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52159 bfa_ioc_reset_cbfn_t reset_cbfn;
52164 * IOC event notification mechanism.
52165 @@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52166 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52167 enum bfi_ioc_state fwstate);
52168 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52173 * Queue element to wait for room in request queue. FIFO order is
52174 diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52175 index a14c784..6de6790 100644
52176 --- a/drivers/scsi/bfa/bfa_modules.h
52177 +++ b/drivers/scsi/bfa/bfa_modules.h
52178 @@ -78,12 +78,12 @@ enum {
52180 extern struct bfa_module_s hal_mod_ ## __mod; \
52181 struct bfa_module_s hal_mod_ ## __mod = { \
52182 - bfa_ ## __mod ## _meminfo, \
52183 - bfa_ ## __mod ## _attach, \
52184 - bfa_ ## __mod ## _detach, \
52185 - bfa_ ## __mod ## _start, \
52186 - bfa_ ## __mod ## _stop, \
52187 - bfa_ ## __mod ## _iocdisable, \
52188 + .meminfo = bfa_ ## __mod ## _meminfo, \
52189 + .attach = bfa_ ## __mod ## _attach, \
52190 + .detach = bfa_ ## __mod ## _detach, \
52191 + .start = bfa_ ## __mod ## _start, \
52192 + .stop = bfa_ ## __mod ## _stop, \
52193 + .iocdisable = bfa_ ## __mod ## _iocdisable, \
52196 #define BFA_CACHELINE_SZ (256)
52197 diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52198 index 045c4e1..13de803 100644
52199 --- a/drivers/scsi/fcoe/fcoe_sysfs.c
52200 +++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52203 #include "libfcoe.h"
52205 -static atomic_t ctlr_num;
52206 -static atomic_t fcf_num;
52207 +static atomic_unchecked_t ctlr_num;
52208 +static atomic_unchecked_t fcf_num;
52211 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52212 @@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52216 - ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52217 + ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52219 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52220 INIT_LIST_HEAD(&ctlr->fcfs);
52221 @@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52222 fcf->dev.parent = &ctlr->dev;
52223 fcf->dev.bus = &fcoe_bus_type;
52224 fcf->dev.type = &fcoe_fcf_device_type;
52225 - fcf->id = atomic_inc_return(&fcf_num) - 1;
52226 + fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52227 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52229 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52230 @@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52234 - atomic_set(&ctlr_num, 0);
52235 - atomic_set(&fcf_num, 0);
52236 + atomic_set_unchecked(&ctlr_num, 0);
52237 + atomic_set_unchecked(&fcf_num, 0);
52239 error = bus_register(&fcoe_bus_type);
52241 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52242 index 8bb173e..20236b4 100644
52243 --- a/drivers/scsi/hosts.c
52244 +++ b/drivers/scsi/hosts.c
52246 #include "scsi_logging.h"
52249 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52250 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52253 static void scsi_host_cls_release(struct device *dev)
52254 @@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52255 * subtract one because we increment first then return, but we need to
52256 * know what the next host number was before increment
52258 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52259 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52260 shost->dma_channel = 0xff;
52262 /* These three are default values which can be overridden */
52263 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52264 index 8eab107..599cd79 100644
52265 --- a/drivers/scsi/hpsa.c
52266 +++ b/drivers/scsi/hpsa.c
52267 @@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52268 struct reply_queue_buffer *rq = &h->reply_queue[q];
52270 if (h->transMethod & CFGTBL_Trans_io_accel1)
52271 - return h->access.command_completed(h, q);
52272 + return h->access->command_completed(h, q);
52274 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52275 - return h->access.command_completed(h, q);
52276 + return h->access->command_completed(h, q);
52278 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52279 a = rq->head[rq->current_entry];
52280 @@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
52283 set_performant_mode(h, c);
52284 - h->access.submit_command(h, c);
52285 + h->access->submit_command(h, c);
52289 @@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
52291 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52293 - return h->access.command_completed(h, q);
52294 + return h->access->command_completed(h, q);
52297 static inline bool interrupt_pending(struct ctlr_info *h)
52299 - return h->access.intr_pending(h);
52300 + return h->access->intr_pending(h);
52303 static inline long interrupt_not_for_us(struct ctlr_info *h)
52305 - return (h->access.intr_pending(h) == 0) ||
52306 + return (h->access->intr_pending(h) == 0) ||
52307 (h->interrupts_enabled == 0);
52310 @@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52311 if (prod_index < 0)
52313 h->product_name = products[prod_index].product_name;
52314 - h->access = *(products[prod_index].access);
52315 + h->access = products[prod_index].access;
52317 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52318 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52319 @@ -6647,7 +6647,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52320 unsigned long flags;
52321 u32 lockup_detected;
52323 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
52324 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
52325 spin_lock_irqsave(&h->lock, flags);
52326 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52327 if (!lockup_detected) {
52328 @@ -6922,7 +6922,7 @@ reinit_after_soft_reset:
52331 /* make sure the board interrupts are off */
52332 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
52333 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
52335 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52337 @@ -6958,7 +6958,7 @@ reinit_after_soft_reset:
52338 * fake ones to scoop up any residual completions.
52340 spin_lock_irqsave(&h->lock, flags);
52341 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
52342 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
52343 spin_unlock_irqrestore(&h->lock, flags);
52345 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52346 @@ -6977,9 +6977,9 @@ reinit_after_soft_reset:
52347 dev_info(&h->pdev->dev, "Board READY.\n");
52348 dev_info(&h->pdev->dev,
52349 "Waiting for stale completions to drain.\n");
52350 - h->access.set_intr_mask(h, HPSA_INTR_ON);
52351 + h->access->set_intr_mask(h, HPSA_INTR_ON);
52353 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
52354 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
52356 rc = controller_reset_failed(h->cfgtable);
52358 @@ -7004,7 +7004,7 @@ reinit_after_soft_reset:
52361 /* Turn the interrupts on so we can service requests */
52362 - h->access.set_intr_mask(h, HPSA_INTR_ON);
52363 + h->access->set_intr_mask(h, HPSA_INTR_ON);
52365 hpsa_hba_inquiry(h);
52366 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52367 @@ -7077,7 +7077,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52368 * To write all data in the battery backed cache to disks
52370 hpsa_flush_cache(h);
52371 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
52372 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
52373 hpsa_free_irqs_and_disable_msix(h);
52376 @@ -7198,7 +7198,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52377 CFGTBL_Trans_enable_directed_msix |
52378 (trans_support & (CFGTBL_Trans_io_accel1 |
52379 CFGTBL_Trans_io_accel2));
52380 - struct access_method access = SA5_performant_access;
52381 + struct access_method *access = &SA5_performant_access;
52383 /* This is a bit complicated. There are 8 registers on
52384 * the controller which we write to to tell it 8 different
52385 @@ -7240,7 +7240,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52386 * perform the superfluous readl() after each command submission.
52388 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52389 - access = SA5_performant_access_no_read;
52390 + access = &SA5_performant_access_no_read;
52392 /* Controller spec: zero out this buffer. */
52393 for (i = 0; i < h->nreply_queues; i++)
52394 @@ -7270,12 +7270,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52395 * enable outbound interrupt coalescing in accelerator mode;
52397 if (trans_support & CFGTBL_Trans_io_accel1) {
52398 - access = SA5_ioaccel_mode1_access;
52399 + access = &SA5_ioaccel_mode1_access;
52400 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52401 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52403 if (trans_support & CFGTBL_Trans_io_accel2) {
52404 - access = SA5_ioaccel_mode2_access;
52405 + access = &SA5_ioaccel_mode2_access;
52406 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52407 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52409 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52410 index 6577130..955f9a4 100644
52411 --- a/drivers/scsi/hpsa.h
52412 +++ b/drivers/scsi/hpsa.h
52413 @@ -143,7 +143,7 @@ struct ctlr_info {
52414 unsigned int msix_vector;
52415 unsigned int msi_vector;
52416 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52417 - struct access_method access;
52418 + struct access_method *access;
52419 char hba_mode_enabled;
52421 /* queue and queue Info */
52422 @@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52425 static struct access_method SA5_access = {
52426 - SA5_submit_command,
52428 - SA5_intr_pending,
52430 + .submit_command = SA5_submit_command,
52431 + .set_intr_mask = SA5_intr_mask,
52432 + .intr_pending = SA5_intr_pending,
52433 + .command_completed = SA5_completed,
52436 static struct access_method SA5_ioaccel_mode1_access = {
52437 - SA5_submit_command,
52438 - SA5_performant_intr_mask,
52439 - SA5_ioaccel_mode1_intr_pending,
52440 - SA5_ioaccel_mode1_completed,
52441 + .submit_command = SA5_submit_command,
52442 + .set_intr_mask = SA5_performant_intr_mask,
52443 + .intr_pending = SA5_ioaccel_mode1_intr_pending,
52444 + .command_completed = SA5_ioaccel_mode1_completed,
52447 static struct access_method SA5_ioaccel_mode2_access = {
52448 - SA5_submit_command_ioaccel2,
52449 - SA5_performant_intr_mask,
52450 - SA5_performant_intr_pending,
52451 - SA5_performant_completed,
52452 + .submit_command = SA5_submit_command_ioaccel2,
52453 + .set_intr_mask = SA5_performant_intr_mask,
52454 + .intr_pending = SA5_performant_intr_pending,
52455 + .command_completed = SA5_performant_completed,
52458 static struct access_method SA5_performant_access = {
52459 - SA5_submit_command,
52460 - SA5_performant_intr_mask,
52461 - SA5_performant_intr_pending,
52462 - SA5_performant_completed,
52463 + .submit_command = SA5_submit_command,
52464 + .set_intr_mask = SA5_performant_intr_mask,
52465 + .intr_pending = SA5_performant_intr_pending,
52466 + .command_completed = SA5_performant_completed,
52469 static struct access_method SA5_performant_access_no_read = {
52470 - SA5_submit_command_no_read,
52471 - SA5_performant_intr_mask,
52472 - SA5_performant_intr_pending,
52473 - SA5_performant_completed,
52474 + .submit_command = SA5_submit_command_no_read,
52475 + .set_intr_mask = SA5_performant_intr_mask,
52476 + .intr_pending = SA5_performant_intr_pending,
52477 + .command_completed = SA5_performant_completed,
52480 struct board_type {
52481 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52482 index 1b3a094..068e683 100644
52483 --- a/drivers/scsi/libfc/fc_exch.c
52484 +++ b/drivers/scsi/libfc/fc_exch.c
52485 @@ -101,12 +101,12 @@ struct fc_exch_mgr {
52486 u16 pool_max_index;
52489 - atomic_t no_free_exch;
52490 - atomic_t no_free_exch_xid;
52491 - atomic_t xid_not_found;
52492 - atomic_t xid_busy;
52493 - atomic_t seq_not_found;
52494 - atomic_t non_bls_resp;
52495 + atomic_unchecked_t no_free_exch;
52496 + atomic_unchecked_t no_free_exch_xid;
52497 + atomic_unchecked_t xid_not_found;
52498 + atomic_unchecked_t xid_busy;
52499 + atomic_unchecked_t seq_not_found;
52500 + atomic_unchecked_t non_bls_resp;
52504 @@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52505 /* allocate memory for exchange */
52506 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52508 - atomic_inc(&mp->stats.no_free_exch);
52509 + atomic_inc_unchecked(&mp->stats.no_free_exch);
52512 memset(ep, 0, sizeof(*ep));
52513 @@ -874,7 +874,7 @@ out:
52516 spin_unlock_bh(&pool->lock);
52517 - atomic_inc(&mp->stats.no_free_exch_xid);
52518 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52519 mempool_free(ep, mp->ep_pool);
52522 @@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52523 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52524 ep = fc_exch_find(mp, xid);
52526 - atomic_inc(&mp->stats.xid_not_found);
52527 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52528 reject = FC_RJT_OX_ID;
52531 @@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52532 ep = fc_exch_find(mp, xid);
52533 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52535 - atomic_inc(&mp->stats.xid_busy);
52536 + atomic_inc_unchecked(&mp->stats.xid_busy);
52537 reject = FC_RJT_RX_ID;
52540 @@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52542 xid = ep->xid; /* get our XID */
52544 - atomic_inc(&mp->stats.xid_not_found);
52545 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52546 reject = FC_RJT_RX_ID; /* XID not found */
52549 @@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52552 if (sp->id != fh->fh_seq_id) {
52553 - atomic_inc(&mp->stats.seq_not_found);
52554 + atomic_inc_unchecked(&mp->stats.seq_not_found);
52555 if (f_ctl & FC_FC_END_SEQ) {
52557 * Update sequence_id based on incoming last
52558 @@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52560 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52562 - atomic_inc(&mp->stats.xid_not_found);
52563 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52566 if (ep->esb_stat & ESB_ST_COMPLETE) {
52567 - atomic_inc(&mp->stats.xid_not_found);
52568 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52571 if (ep->rxid == FC_XID_UNKNOWN)
52572 ep->rxid = ntohs(fh->fh_rx_id);
52573 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52574 - atomic_inc(&mp->stats.xid_not_found);
52575 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52578 if (ep->did != ntoh24(fh->fh_s_id) &&
52579 ep->did != FC_FID_FLOGI) {
52580 - atomic_inc(&mp->stats.xid_not_found);
52581 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52585 @@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52586 sp->ssb_stat |= SSB_ST_RESP;
52587 sp->id = fh->fh_seq_id;
52588 } else if (sp->id != fh->fh_seq_id) {
52589 - atomic_inc(&mp->stats.seq_not_found);
52590 + atomic_inc_unchecked(&mp->stats.seq_not_found);
52594 @@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52595 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52598 - atomic_inc(&mp->stats.xid_not_found);
52599 + atomic_inc_unchecked(&mp->stats.xid_not_found);
52601 - atomic_inc(&mp->stats.non_bls_resp);
52602 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
52606 @@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52608 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52610 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52611 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52612 st->fc_no_free_exch_xid +=
52613 - atomic_read(&mp->stats.no_free_exch_xid);
52614 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52615 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52616 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52617 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52618 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52619 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52620 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52621 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52622 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52625 EXPORT_SYMBOL(fc_exch_update_stats);
52626 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52627 index 9c706d8..d3e3ed2 100644
52628 --- a/drivers/scsi/libsas/sas_ata.c
52629 +++ b/drivers/scsi/libsas/sas_ata.c
52630 @@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52631 .postreset = ata_std_postreset,
52632 .error_handler = ata_std_error_handler,
52633 .post_internal_cmd = sas_ata_post_internal,
52634 - .qc_defer = ata_std_qc_defer,
52635 + .qc_defer = ata_std_qc_defer,
52636 .qc_prep = ata_noop_qc_prep,
52637 .qc_issue = sas_ata_qc_issue,
52638 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52639 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52640 index 9b81a34..a9b7b8c 100644
52641 --- a/drivers/scsi/lpfc/lpfc.h
52642 +++ b/drivers/scsi/lpfc/lpfc.h
52643 @@ -433,7 +433,7 @@ struct lpfc_vport {
52644 struct dentry *debug_nodelist;
52645 struct dentry *vport_debugfs_root;
52646 struct lpfc_debugfs_trc *disc_trc;
52647 - atomic_t disc_trc_cnt;
52648 + atomic_unchecked_t disc_trc_cnt;
52650 uint8_t stat_data_enabled;
52651 uint8_t stat_data_blocked;
52652 @@ -883,8 +883,8 @@ struct lpfc_hba {
52653 struct timer_list fabric_block_timer;
52654 unsigned long bit_flags;
52655 #define FABRIC_COMANDS_BLOCKED 0
52656 - atomic_t num_rsrc_err;
52657 - atomic_t num_cmd_success;
52658 + atomic_unchecked_t num_rsrc_err;
52659 + atomic_unchecked_t num_cmd_success;
52660 unsigned long last_rsrc_error_time;
52661 unsigned long last_ramp_down_time;
52662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52663 @@ -919,7 +919,7 @@ struct lpfc_hba {
52665 struct dentry *debug_slow_ring_trc;
52666 struct lpfc_debugfs_trc *slow_ring_trc;
52667 - atomic_t slow_ring_trc_cnt;
52668 + atomic_unchecked_t slow_ring_trc_cnt;
52669 /* iDiag debugfs sub-directory */
52670 struct dentry *idiag_root;
52671 struct dentry *idiag_pci_cfg;
52672 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52673 index 513edcb..805c6a8 100644
52674 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
52675 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52676 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52678 #include <linux/debugfs.h>
52680 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52681 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52682 static unsigned long lpfc_debugfs_start_time = 0L;
52685 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52686 lpfc_debugfs_enable = 0;
52689 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52690 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52691 (lpfc_debugfs_max_disc_trc - 1);
52692 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52693 dtp = vport->disc_trc + i;
52694 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52695 lpfc_debugfs_enable = 0;
52698 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52699 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52700 (lpfc_debugfs_max_slow_ring_trc - 1);
52701 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52702 dtp = phba->slow_ring_trc + i;
52703 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52704 !vport || !vport->disc_trc)
52707 - index = atomic_inc_return(&vport->disc_trc_cnt) &
52708 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52709 (lpfc_debugfs_max_disc_trc - 1);
52710 dtp = vport->disc_trc + index;
52712 dtp->data1 = data1;
52713 dtp->data2 = data2;
52714 dtp->data3 = data3;
52715 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52716 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52717 dtp->jif = jiffies;
52720 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52721 !phba || !phba->slow_ring_trc)
52724 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52725 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52726 (lpfc_debugfs_max_slow_ring_trc - 1);
52727 dtp = phba->slow_ring_trc + index;
52729 dtp->data1 = data1;
52730 dtp->data2 = data2;
52731 dtp->data3 = data3;
52732 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52733 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52734 dtp->jif = jiffies;
52737 @@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52738 "slow_ring buffer\n");
52741 - atomic_set(&phba->slow_ring_trc_cnt, 0);
52742 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52743 memset(phba->slow_ring_trc, 0,
52744 (sizeof(struct lpfc_debugfs_trc) *
52745 lpfc_debugfs_max_slow_ring_trc));
52746 @@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52750 - atomic_set(&vport->disc_trc_cnt, 0);
52751 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52753 snprintf(name, sizeof(name), "discovery_trace");
52754 vport->debug_disc_trc =
52755 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52756 index e8c8c1e..5f2e11c 100644
52757 --- a/drivers/scsi/lpfc/lpfc_init.c
52758 +++ b/drivers/scsi/lpfc/lpfc_init.c
52759 @@ -11406,8 +11406,10 @@ lpfc_init(void)
52760 "misc_register returned with status %d", error);
52762 if (lpfc_enable_npiv) {
52763 - lpfc_transport_functions.vport_create = lpfc_vport_create;
52764 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52765 + pax_open_kernel();
52766 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52767 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52768 + pax_close_kernel();
52770 lpfc_transport_template =
52771 fc_attach_transport(&lpfc_transport_functions);
52772 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52773 index c140f99..11b2505 100644
52774 --- a/drivers/scsi/lpfc/lpfc_scsi.c
52775 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
52776 @@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52777 unsigned long expires;
52779 spin_lock_irqsave(&phba->hbalock, flags);
52780 - atomic_inc(&phba->num_rsrc_err);
52781 + atomic_inc_unchecked(&phba->num_rsrc_err);
52782 phba->last_rsrc_error_time = jiffies;
52784 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52785 @@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52786 unsigned long num_rsrc_err, num_cmd_success;
52789 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52790 - num_cmd_success = atomic_read(&phba->num_cmd_success);
52791 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52792 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52795 * The error and success command counters are global per
52796 @@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52799 lpfc_destroy_vport_work_array(phba, vports);
52800 - atomic_set(&phba->num_rsrc_err, 0);
52801 - atomic_set(&phba->num_cmd_success, 0);
52802 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
52803 + atomic_set_unchecked(&phba->num_cmd_success, 0);
52807 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52808 index 3f26147..ee8efd1 100644
52809 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52810 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52811 @@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52813 struct scsi_device *sdev = to_scsi_device(dev);
52814 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52815 - static struct _raid_device *raid_device;
52816 + struct _raid_device *raid_device;
52817 unsigned long flags;
52818 Mpi2RaidVolPage0_t vol_pg0;
52819 Mpi2ConfigReply_t mpi_reply;
52820 @@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52822 struct scsi_device *sdev = to_scsi_device(dev);
52823 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52824 - static struct _raid_device *raid_device;
52825 + struct _raid_device *raid_device;
52826 unsigned long flags;
52827 Mpi2RaidVolPage0_t vol_pg0;
52828 Mpi2ConfigReply_t mpi_reply;
52829 @@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52830 Mpi2EventDataIrOperationStatus_t *event_data =
52831 (Mpi2EventDataIrOperationStatus_t *)
52832 fw_event->event_data;
52833 - static struct _raid_device *raid_device;
52834 + struct _raid_device *raid_device;
52835 unsigned long flags;
52838 @@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52840 struct _sas_device *sas_device;
52841 struct _sas_node *expander_device;
52842 - static struct _raid_device *raid_device;
52843 + struct _raid_device *raid_device;
52845 unsigned long flags;
52847 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52848 index ed31d8c..ab856b3 100644
52849 --- a/drivers/scsi/pmcraid.c
52850 +++ b/drivers/scsi/pmcraid.c
52851 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52852 res->scsi_dev = scsi_dev;
52853 scsi_dev->hostdata = res;
52854 res->change_detected = 0;
52855 - atomic_set(&res->read_failures, 0);
52856 - atomic_set(&res->write_failures, 0);
52857 + atomic_set_unchecked(&res->read_failures, 0);
52858 + atomic_set_unchecked(&res->write_failures, 0);
52861 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52862 @@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52864 /* If this was a SCSI read/write command keep count of errors */
52865 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52866 - atomic_inc(&res->read_failures);
52867 + atomic_inc_unchecked(&res->read_failures);
52868 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52869 - atomic_inc(&res->write_failures);
52870 + atomic_inc_unchecked(&res->write_failures);
52872 if (!RES_IS_GSCSI(res->cfg_entry) &&
52873 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52874 @@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52875 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52876 * hrrq_id assigned here in queuecommand
52878 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52879 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52880 pinstance->num_hrrq;
52881 cmd->cmd_done = pmcraid_io_done;
52883 @@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52884 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52885 * hrrq_id assigned here in queuecommand
52887 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52888 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52889 pinstance->num_hrrq;
52891 if (request_size) {
52892 @@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52894 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52895 /* add resources only after host is added into system */
52896 - if (!atomic_read(&pinstance->expose_resources))
52897 + if (!atomic_read_unchecked(&pinstance->expose_resources))
52900 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52901 @@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52902 init_waitqueue_head(&pinstance->reset_wait_q);
52904 atomic_set(&pinstance->outstanding_cmds, 0);
52905 - atomic_set(&pinstance->last_message_id, 0);
52906 - atomic_set(&pinstance->expose_resources, 0);
52907 + atomic_set_unchecked(&pinstance->last_message_id, 0);
52908 + atomic_set_unchecked(&pinstance->expose_resources, 0);
52910 INIT_LIST_HEAD(&pinstance->free_res_q);
52911 INIT_LIST_HEAD(&pinstance->used_res_q);
52912 @@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52913 /* Schedule worker thread to handle CCN and take care of adding and
52914 * removing devices to OS
52916 - atomic_set(&pinstance->expose_resources, 1);
52917 + atomic_set_unchecked(&pinstance->expose_resources, 1);
52918 schedule_work(&pinstance->worker_q);
52921 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52922 index e1d150f..6c6df44 100644
52923 --- a/drivers/scsi/pmcraid.h
52924 +++ b/drivers/scsi/pmcraid.h
52925 @@ -748,7 +748,7 @@ struct pmcraid_instance {
52926 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52928 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52929 - atomic_t last_message_id;
52930 + atomic_unchecked_t last_message_id;
52932 /* configuration table */
52933 struct pmcraid_config_table *cfg_table;
52934 @@ -777,7 +777,7 @@ struct pmcraid_instance {
52935 atomic_t outstanding_cmds;
52937 /* should add/delete resources to mid-layer now ?*/
52938 - atomic_t expose_resources;
52939 + atomic_unchecked_t expose_resources;
52943 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52944 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52946 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52947 - atomic_t read_failures; /* count of failed READ commands */
52948 - atomic_t write_failures; /* count of failed WRITE commands */
52949 + atomic_unchecked_t read_failures; /* count of failed READ commands */
52950 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52952 /* To indicate add/delete/modify during CCN */
52953 u8 change_detected;
52954 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52955 index 82b92c4..3178171 100644
52956 --- a/drivers/scsi/qla2xxx/qla_attr.c
52957 +++ b/drivers/scsi/qla2xxx/qla_attr.c
52958 @@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52962 -struct fc_function_template qla2xxx_transport_functions = {
52963 +fc_function_template_no_const qla2xxx_transport_functions = {
52965 .show_host_node_name = 1,
52966 .show_host_port_name = 1,
52967 @@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52968 .bsg_timeout = qla24xx_bsg_timeout,
52971 -struct fc_function_template qla2xxx_transport_vport_functions = {
52972 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
52974 .show_host_node_name = 1,
52975 .show_host_port_name = 1,
52976 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52977 index 7686bfe..4710893 100644
52978 --- a/drivers/scsi/qla2xxx/qla_gbl.h
52979 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
52980 @@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52981 struct device_attribute;
52982 extern struct device_attribute *qla2x00_host_attrs[];
52983 struct fc_function_template;
52984 -extern struct fc_function_template qla2xxx_transport_functions;
52985 -extern struct fc_function_template qla2xxx_transport_vport_functions;
52986 +extern fc_function_template_no_const qla2xxx_transport_functions;
52987 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52988 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52989 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52990 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52991 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52992 index 7462dd7..5b64c24 100644
52993 --- a/drivers/scsi/qla2xxx/qla_os.c
52994 +++ b/drivers/scsi/qla2xxx/qla_os.c
52995 @@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52996 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52997 /* Ok, a 64bit DMA mask is applicable. */
52998 ha->flags.enable_64bit_addressing = 1;
52999 - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53000 - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53001 + pax_open_kernel();
53002 + *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53003 + *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53004 + pax_close_kernel();
53008 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
53009 index 8f6d0fb..1b21097 100644
53010 --- a/drivers/scsi/qla4xxx/ql4_def.h
53011 +++ b/drivers/scsi/qla4xxx/ql4_def.h
53012 @@ -305,7 +305,7 @@ struct ddb_entry {
53014 atomic_t relogin_timer; /* Max Time to wait for
53015 * relogin to complete */
53016 - atomic_t relogin_retry_count; /* Num of times relogin has been
53017 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
53019 uint32_t default_time2wait; /* Default Min time between
53020 * relogins (+aens) */
53021 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
53022 index 6d25879..3031a9f 100644
53023 --- a/drivers/scsi/qla4xxx/ql4_os.c
53024 +++ b/drivers/scsi/qla4xxx/ql4_os.c
53025 @@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
53027 if (!iscsi_is_session_online(cls_sess)) {
53028 /* Reset retry relogin timer */
53029 - atomic_inc(&ddb_entry->relogin_retry_count);
53030 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
53031 DEBUG2(ql4_printk(KERN_INFO, ha,
53032 "%s: index[%d] relogin timed out-retrying"
53033 " relogin (%d), retry (%d)\n", __func__,
53034 ddb_entry->fw_ddb_index,
53035 - atomic_read(&ddb_entry->relogin_retry_count),
53036 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
53037 ddb_entry->default_time2wait + 4));
53038 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
53039 atomic_set(&ddb_entry->retry_relogin_timer,
53040 @@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
53042 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
53043 atomic_set(&ddb_entry->relogin_timer, 0);
53044 - atomic_set(&ddb_entry->relogin_retry_count, 0);
53045 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
53046 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
53047 ddb_entry->default_relogin_timeout =
53048 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
53049 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
53050 index 3833bf5..95feaf1 100644
53051 --- a/drivers/scsi/scsi.c
53052 +++ b/drivers/scsi/scsi.c
53053 @@ -637,7 +637,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
53055 good_bytes = scsi_bufflen(cmd);
53056 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
53057 - int old_good_bytes = good_bytes;
53058 + unsigned int old_good_bytes = good_bytes;
53059 drv = scsi_cmd_to_driver(cmd);
53061 good_bytes = drv->done(cmd);
53062 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
53063 index b1a2631..5bcd9c8 100644
53064 --- a/drivers/scsi/scsi_lib.c
53065 +++ b/drivers/scsi/scsi_lib.c
53066 @@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
53067 shost = sdev->host;
53068 scsi_init_cmd_errh(cmd);
53069 cmd->result = DID_NO_CONNECT << 16;
53070 - atomic_inc(&cmd->device->iorequest_cnt);
53071 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53074 * SCSI request completion path will do scsi_device_unbusy(),
53075 @@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
53077 INIT_LIST_HEAD(&cmd->eh_entry);
53079 - atomic_inc(&cmd->device->iodone_cnt);
53080 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
53082 - atomic_inc(&cmd->device->ioerr_cnt);
53083 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53085 disposition = scsi_decide_disposition(cmd);
53086 if (disposition != SUCCESS &&
53087 @@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53088 struct Scsi_Host *host = cmd->device->host;
53091 - atomic_inc(&cmd->device->iorequest_cnt);
53092 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53094 /* check if the device is still usable */
53095 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53096 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53097 index 1ac38e7..6acc656 100644
53098 --- a/drivers/scsi/scsi_sysfs.c
53099 +++ b/drivers/scsi/scsi_sysfs.c
53100 @@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53103 struct scsi_device *sdev = to_scsi_device(dev); \
53104 - unsigned long long count = atomic_read(&sdev->field); \
53105 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
53106 return snprintf(buf, 20, "0x%llx\n", count); \
53108 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53109 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53110 index 24eaaf6..de30ec9 100644
53111 --- a/drivers/scsi/scsi_transport_fc.c
53112 +++ b/drivers/scsi/scsi_transport_fc.c
53113 @@ -502,7 +502,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53114 * Netlink Infrastructure
53117 -static atomic_t fc_event_seq;
53118 +static atomic_unchecked_t fc_event_seq;
53121 * fc_get_event_number - Obtain the next sequential FC event number
53122 @@ -515,7 +515,7 @@ static atomic_t fc_event_seq;
53124 fc_get_event_number(void)
53126 - return atomic_add_return(1, &fc_event_seq);
53127 + return atomic_add_return_unchecked(1, &fc_event_seq);
53129 EXPORT_SYMBOL(fc_get_event_number);
53131 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
53135 - atomic_set(&fc_event_seq, 0);
53136 + atomic_set_unchecked(&fc_event_seq, 0);
53138 error = transport_class_register(&fc_host_class);
53140 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53143 *val = simple_strtoul(buf, &cp, 0);
53144 - if ((*cp && (*cp != '\n')) || (*val < 0))
53145 + if (*cp && (*cp != '\n'))
53148 * Check for overflow; dev_loss_tmo is u32
53149 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53150 index 67d43e3..8cee73c 100644
53151 --- a/drivers/scsi/scsi_transport_iscsi.c
53152 +++ b/drivers/scsi/scsi_transport_iscsi.c
53153 @@ -79,7 +79,7 @@ struct iscsi_internal {
53154 struct transport_container session_cont;
53157 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53158 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53159 static struct workqueue_struct *iscsi_eh_timer_workq;
53161 static DEFINE_IDA(iscsi_sess_ida);
53162 @@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53165 ihost = shost->shost_data;
53166 - session->sid = atomic_add_return(1, &iscsi_session_nr);
53167 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53169 if (target_id == ISCSI_MAX_TARGET) {
53170 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53171 @@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
53172 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53173 ISCSI_TRANSPORT_VERSION);
53175 - atomic_set(&iscsi_session_nr, 0);
53176 + atomic_set_unchecked(&iscsi_session_nr, 0);
53178 err = class_register(&iscsi_transport_class);
53180 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53181 index f115f67..b80b2c1 100644
53182 --- a/drivers/scsi/scsi_transport_srp.c
53183 +++ b/drivers/scsi/scsi_transport_srp.c
53185 #include "scsi_priv.h"
53187 struct srp_host_attrs {
53188 - atomic_t next_port_id;
53189 + atomic_unchecked_t next_port_id;
53191 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53193 @@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53194 struct Scsi_Host *shost = dev_to_shost(dev);
53195 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53197 - atomic_set(&srp_host->next_port_id, 0);
53198 + atomic_set_unchecked(&srp_host->next_port_id, 0);
53202 @@ -744,7 +744,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53203 rport_fast_io_fail_timedout);
53204 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53206 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53207 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53208 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53210 transport_setup_device(&rport->dev);
53211 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53212 index 7f9d65f..e856438 100644
53213 --- a/drivers/scsi/sd.c
53214 +++ b/drivers/scsi/sd.c
53215 @@ -111,7 +111,7 @@ static int sd_resume(struct device *);
53216 static void sd_rescan(struct device *);
53217 static int sd_init_command(struct scsi_cmnd *SCpnt);
53218 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
53219 -static int sd_done(struct scsi_cmnd *);
53220 +static unsigned int sd_done(struct scsi_cmnd *);
53221 static int sd_eh_action(struct scsi_cmnd *, int);
53222 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
53223 static void scsi_disk_release(struct device *cdev);
53224 @@ -1646,7 +1646,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
53226 * Note: potentially run from within an ISR. Must not block.
53228 -static int sd_done(struct scsi_cmnd *SCpnt)
53229 +static unsigned int sd_done(struct scsi_cmnd *SCpnt)
53231 int result = SCpnt->result;
53232 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
53233 @@ -2973,7 +2973,7 @@ static int sd_probe(struct device *dev)
53235 sdkp->index = index;
53236 atomic_set(&sdkp->openers, 0);
53237 - atomic_set(&sdkp->device->ioerr_cnt, 0);
53238 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53240 if (!sdp->request_queue->rq_timeout) {
53241 if (sdp->type != TYPE_MOD)
53242 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53243 index 9d7b7db..33ecc51 100644
53244 --- a/drivers/scsi/sg.c
53245 +++ b/drivers/scsi/sg.c
53246 @@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53247 sdp->disk->disk_name,
53248 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53251 + (char __user *)arg);
53252 case BLKTRACESTART:
53253 return blk_trace_startstop(sdp->device->request_queue, 1);
53255 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
53256 index 8bd54a6..dd037a5 100644
53257 --- a/drivers/scsi/sr.c
53258 +++ b/drivers/scsi/sr.c
53259 @@ -80,7 +80,7 @@ static DEFINE_MUTEX(sr_mutex);
53260 static int sr_probe(struct device *);
53261 static int sr_remove(struct device *);
53262 static int sr_init_command(struct scsi_cmnd *SCpnt);
53263 -static int sr_done(struct scsi_cmnd *);
53264 +static unsigned int sr_done(struct scsi_cmnd *);
53265 static int sr_runtime_suspend(struct device *dev);
53267 static struct dev_pm_ops sr_pm_ops = {
53268 @@ -312,11 +312,11 @@ do_tur:
53269 * It will be notified on the end of a SCSI read / write, and will take one
53270 * of several actions based on success or failure.
53272 -static int sr_done(struct scsi_cmnd *SCpnt)
53273 +static unsigned int sr_done(struct scsi_cmnd *SCpnt)
53275 int result = SCpnt->result;
53276 - int this_count = scsi_bufflen(SCpnt);
53277 - int good_bytes = (result == 0 ? this_count : 0);
53278 + unsigned int this_count = scsi_bufflen(SCpnt);
53279 + unsigned int good_bytes = (result == 0 ? this_count : 0);
53280 int block_sectors = 0;
53282 struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
53283 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
53284 index 9a1c342..525ab4c 100644
53285 --- a/drivers/scsi/st.c
53286 +++ b/drivers/scsi/st.c
53287 @@ -1274,9 +1274,9 @@ static int st_open(struct inode *inode, struct file *filp)
53288 spin_lock(&st_use_lock);
53290 spin_unlock(&st_use_lock);
53291 - scsi_tape_put(STp);
53293 scsi_autopm_put_device(STp->device);
53294 + scsi_tape_put(STp);
53298 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
53299 index c0d660f..24a5854 100644
53300 --- a/drivers/soc/tegra/fuse/fuse-tegra.c
53301 +++ b/drivers/soc/tegra/fuse/fuse-tegra.c
53302 @@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
53306 -static struct bin_attribute fuse_bin_attr = {
53307 +static bin_attribute_no_const fuse_bin_attr = {
53308 .attr = { .name = "fuse", .mode = S_IRUGO, },
53311 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53312 index d35c1a1..eda08dc 100644
53313 --- a/drivers/spi/spi.c
53314 +++ b/drivers/spi/spi.c
53315 @@ -2206,7 +2206,7 @@ int spi_bus_unlock(struct spi_master *master)
53316 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53318 /* portable code must never pass more than 32 bytes */
53319 -#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53320 +#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53324 diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53325 index b41429f..2de5373 100644
53326 --- a/drivers/staging/android/timed_output.c
53327 +++ b/drivers/staging/android/timed_output.c
53329 #include "timed_output.h"
53331 static struct class *timed_output_class;
53332 -static atomic_t device_count;
53333 +static atomic_unchecked_t device_count;
53335 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53337 @@ -65,7 +65,7 @@ static int create_timed_output_class(void)
53338 timed_output_class = class_create(THIS_MODULE, "timed_output");
53339 if (IS_ERR(timed_output_class))
53340 return PTR_ERR(timed_output_class);
53341 - atomic_set(&device_count, 0);
53342 + atomic_set_unchecked(&device_count, 0);
53343 timed_output_class->dev_groups = timed_output_groups;
53346 @@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53350 - tdev->index = atomic_inc_return(&device_count);
53351 + tdev->index = atomic_inc_return_unchecked(&device_count);
53352 tdev->dev = device_create(timed_output_class, NULL,
53353 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53354 if (IS_ERR(tdev->dev))
53355 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
53356 index e78ddbe..ac437c0 100644
53357 --- a/drivers/staging/comedi/comedi_fops.c
53358 +++ b/drivers/staging/comedi/comedi_fops.c
53359 @@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
53361 cfp->last_attached = dev->attached;
53362 cfp->last_detach_count = dev->detach_count;
53363 - ACCESS_ONCE(cfp->read_subdev) = read_s;
53364 - ACCESS_ONCE(cfp->write_subdev) = write_s;
53365 + ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
53366 + ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
53369 static void comedi_file_check(struct file *file)
53370 @@ -1951,7 +1951,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53371 !(s_old->async->cmd.flags & CMDF_WRITE))
53374 - ACCESS_ONCE(cfp->read_subdev) = s_new;
53375 + ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
53379 @@ -1993,7 +1993,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53380 (s_old->async->cmd.flags & CMDF_WRITE))
53383 - ACCESS_ONCE(cfp->write_subdev) = s_new;
53384 + ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
53388 diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53389 index 53b748b..a5ae0b3 100644
53390 --- a/drivers/staging/fbtft/fbtft-core.c
53391 +++ b/drivers/staging/fbtft/fbtft-core.c
53392 @@ -680,7 +680,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53394 struct fb_info *info;
53395 struct fbtft_par *par;
53396 - struct fb_ops *fbops = NULL;
53397 + fb_ops_no_const *fbops = NULL;
53398 struct fb_deferred_io *fbdefio = NULL;
53399 struct fbtft_platform_data *pdata = dev->platform_data;
53401 diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53402 index 9fd98cb..a9cf912 100644
53403 --- a/drivers/staging/fbtft/fbtft.h
53404 +++ b/drivers/staging/fbtft/fbtft.h
53405 @@ -106,7 +106,7 @@ struct fbtft_ops {
53407 int (*set_var)(struct fbtft_par *par);
53408 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53413 * struct fbtft_display - Describes the display properties
53414 diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53415 index 001348c..cfaac8a 100644
53416 --- a/drivers/staging/gdm724x/gdm_tty.c
53417 +++ b/drivers/staging/gdm724x/gdm_tty.c
53419 #define gdm_tty_send_control(n, r, v, d, l) (\
53420 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53422 -#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53423 +#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53425 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53426 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53427 diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53428 index d23c3c2..eb63c81 100644
53429 --- a/drivers/staging/i2o/i2o.h
53430 +++ b/drivers/staging/i2o/i2o.h
53431 @@ -565,7 +565,7 @@ struct i2o_controller {
53432 struct i2o_device *exec; /* Executive */
53433 #if BITS_PER_LONG == 64
53434 spinlock_t context_list_lock; /* lock for context_list */
53435 - atomic_t context_list_counter; /* needed for unique contexts */
53436 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53437 struct list_head context_list; /* list of context id's
53440 diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53441 index 780fee3..ca9dcae 100644
53442 --- a/drivers/staging/i2o/i2o_proc.c
53443 +++ b/drivers/staging/i2o/i2o_proc.c
53444 @@ -253,12 +253,6 @@ static char *scsi_devices[] = {
53445 "Array Controller Device"
53448 -static char *chtostr(char *tmp, u8 *chars, int n)
53451 - return strncat(tmp, (char *)chars, n);
53454 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53457 @@ -711,9 +705,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53458 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53460 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53461 - static u32 work32[5];
53462 - static u8 *work8 = (u8 *) work32;
53463 - static u16 *work16 = (u16 *) work32;
53465 + u8 *work8 = (u8 *) work32;
53466 + u16 *work16 = (u16 *) work32;
53470 @@ -794,7 +788,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53473 i2o_exec_execute_ddm_table ddm_table;
53474 - char tmp[28 + 1];
53476 result = kmalloc(sizeof(*result), GFP_KERNEL);
53478 @@ -829,8 +822,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53480 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53481 seq_printf(seq, "%-#8x", ddm_table.module_id);
53482 - seq_printf(seq, "%-29s",
53483 - chtostr(tmp, ddm_table.module_name_version, 28));
53484 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53485 seq_printf(seq, "%9d ", ddm_table.data_size);
53486 seq_printf(seq, "%8d", ddm_table.code_size);
53488 @@ -897,7 +889,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53490 i2o_driver_result_table *result;
53491 i2o_driver_store_table *dst;
53492 - char tmp[28 + 1];
53494 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53495 if (result == NULL)
53496 @@ -932,9 +923,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53498 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53499 seq_printf(seq, "%-#8x", dst->module_id);
53500 - seq_printf(seq, "%-29s",
53501 - chtostr(tmp, dst->module_name_version, 28));
53502 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53503 + seq_printf(seq, "%-.28s", dst->module_name_version);
53504 + seq_printf(seq, "%-.8s", dst->date);
53505 seq_printf(seq, "%8d ", dst->module_size);
53506 seq_printf(seq, "%8d ", dst->mpb_size);
53507 seq_printf(seq, "0x%04x", dst->module_flags);
53508 @@ -1250,11 +1240,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53509 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53511 struct i2o_device *d = (struct i2o_device *)seq->private;
53512 - static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53513 + u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53514 // == (allow) 512d bytes (max)
53515 - static u16 *work16 = (u16 *) work32;
53516 + u16 *work16 = (u16 *) work32;
53518 - char tmp[16 + 1];
53520 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53522 @@ -1266,14 +1255,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53523 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53524 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53525 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53526 - seq_printf(seq, "Vendor info : %s\n",
53527 - chtostr(tmp, (u8 *) (work32 + 2), 16));
53528 - seq_printf(seq, "Product info : %s\n",
53529 - chtostr(tmp, (u8 *) (work32 + 6), 16));
53530 - seq_printf(seq, "Description : %s\n",
53531 - chtostr(tmp, (u8 *) (work32 + 10), 16));
53532 - seq_printf(seq, "Product rev. : %s\n",
53533 - chtostr(tmp, (u8 *) (work32 + 14), 8));
53534 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53535 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53536 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53537 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53539 seq_printf(seq, "Serial number : ");
53540 print_serial_number(seq, (u8 *) (work32 + 16),
53541 @@ -1310,8 +1295,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53542 u8 pad[256]; // allow up to 256 byte (max) serial number
53545 - char tmp[24 + 1];
53547 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53550 @@ -1320,10 +1303,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53553 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53554 - seq_printf(seq, "Module name : %s\n",
53555 - chtostr(tmp, result.module_name, 24));
53556 - seq_printf(seq, "Module revision : %s\n",
53557 - chtostr(tmp, result.module_rev, 8));
53558 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
53559 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53561 seq_printf(seq, "Serial number : ");
53562 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53563 @@ -1347,8 +1328,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53564 u8 instance_number[4];
53567 - char tmp[64 + 1];
53569 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53572 @@ -1356,14 +1335,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53576 - seq_printf(seq, "Device name : %s\n",
53577 - chtostr(tmp, result.device_name, 64));
53578 - seq_printf(seq, "Service name : %s\n",
53579 - chtostr(tmp, result.service_name, 64));
53580 - seq_printf(seq, "Physical name : %s\n",
53581 - chtostr(tmp, result.physical_location, 64));
53582 - seq_printf(seq, "Instance number : %s\n",
53583 - chtostr(tmp, result.instance_number, 4));
53584 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
53585 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
53586 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53587 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53591 @@ -1372,9 +1347,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53592 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53594 struct i2o_device *d = (struct i2o_device *)seq->private;
53595 - static u32 work32[12];
53596 - static u16 *work16 = (u16 *) work32;
53597 - static u8 *work8 = (u8 *) work32;
53599 + u16 *work16 = (u16 *) work32;
53600 + u8 *work8 = (u8 *) work32;
53603 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53604 diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53605 index 23bdbe4..4e1f340 100644
53606 --- a/drivers/staging/i2o/iop.c
53607 +++ b/drivers/staging/i2o/iop.c
53608 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53610 spin_lock_irqsave(&c->context_list_lock, flags);
53612 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53613 - atomic_inc(&c->context_list_counter);
53614 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53615 + atomic_inc_unchecked(&c->context_list_counter);
53617 - entry->context = atomic_read(&c->context_list_counter);
53618 + entry->context = atomic_read_unchecked(&c->context_list_counter);
53620 list_add(&entry->list, &c->context_list);
53622 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53624 #if BITS_PER_LONG == 64
53625 spin_lock_init(&c->context_list_lock);
53626 - atomic_set(&c->context_list_counter, 0);
53627 + atomic_set_unchecked(&c->context_list_counter, 0);
53628 INIT_LIST_HEAD(&c->context_list);
53631 diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
53632 index b892f2c..9b4898a 100644
53633 --- a/drivers/staging/iio/accel/lis3l02dq_ring.c
53634 +++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
53635 @@ -118,7 +118,7 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
53636 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
53637 indio_dev->masklength);
53639 - rx_array = kcalloc(4, scan_count, GFP_KERNEL);
53640 + rx_array = kcalloc(scan_count, 4, GFP_KERNEL);
53643 ret = lis3l02dq_read_all(indio_dev, rx_array);
53644 diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
53645 index d98e229..9c59bc2 100644
53646 --- a/drivers/staging/iio/adc/ad7280a.c
53647 +++ b/drivers/staging/iio/adc/ad7280a.c
53648 @@ -547,8 +547,8 @@ static int ad7280_attr_init(struct ad7280_state *st)
53652 - st->iio_attr = kcalloc(2, sizeof(*st->iio_attr) *
53653 - (st->slave_num + 1) * AD7280A_CELLS_PER_DEV,
53654 + st->iio_attr = kcalloc(sizeof(*st->iio_attr) *
53655 + (st->slave_num + 1) * AD7280A_CELLS_PER_DEV, 2,
53657 if (st->iio_attr == NULL)
53659 diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53660 index 658f458..0564216 100644
53661 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53662 +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53663 @@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53667 -sfw_test_client_ops_t brw_test_client;
53668 -void brw_init_test_client(void)
53670 - brw_test_client.tso_init = brw_client_init;
53671 - brw_test_client.tso_fini = brw_client_fini;
53672 - brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53673 - brw_test_client.tso_done_rpc = brw_client_done_rpc;
53674 +sfw_test_client_ops_t brw_test_client = {
53675 + .tso_init = brw_client_init,
53676 + .tso_fini = brw_client_fini,
53677 + .tso_prep_rpc = brw_client_prep_rpc,
53678 + .tso_done_rpc = brw_client_done_rpc,
53681 srpc_service_t brw_test_service;
53682 diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53683 index a93a90d..c51dde6 100644
53684 --- a/drivers/staging/lustre/lnet/selftest/framework.c
53685 +++ b/drivers/staging/lustre/lnet/selftest/framework.c
53686 @@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53688 extern sfw_test_client_ops_t ping_test_client;
53689 extern srpc_service_t ping_test_service;
53690 -extern void ping_init_test_client(void);
53691 extern void ping_init_test_service(void);
53693 extern sfw_test_client_ops_t brw_test_client;
53694 extern srpc_service_t brw_test_service;
53695 -extern void brw_init_test_client(void);
53696 extern void brw_init_test_service(void);
53699 @@ -1675,12 +1673,10 @@ sfw_startup(void)
53700 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53701 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53703 - brw_init_test_client();
53704 brw_init_test_service();
53705 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53708 - ping_init_test_client();
53709 ping_init_test_service();
53710 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53712 diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53713 index 644069a..83cbd26 100644
53714 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53715 +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53716 @@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53720 -sfw_test_client_ops_t ping_test_client;
53721 -void ping_init_test_client(void)
53723 - ping_test_client.tso_init = ping_client_init;
53724 - ping_test_client.tso_fini = ping_client_fini;
53725 - ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53726 - ping_test_client.tso_done_rpc = ping_client_done_rpc;
53728 +sfw_test_client_ops_t ping_test_client = {
53729 + .tso_init = ping_client_init,
53730 + .tso_fini = ping_client_fini,
53731 + .tso_prep_rpc = ping_client_prep_rpc,
53732 + .tso_done_rpc = ping_client_done_rpc,
53735 srpc_service_t ping_test_service;
53736 void ping_init_test_service(void)
53737 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53738 index bac9902..0225fe1 100644
53739 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53740 +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53741 @@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53742 ldlm_completion_callback lcs_completion;
53743 ldlm_blocking_callback lcs_blocking;
53744 ldlm_glimpse_callback lcs_glimpse;
53749 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53750 diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53751 index 2a88b80..62e7e5f 100644
53752 --- a/drivers/staging/lustre/lustre/include/obd.h
53753 +++ b/drivers/staging/lustre/lustre/include/obd.h
53754 @@ -1362,7 +1362,7 @@ struct md_ops {
53755 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53756 * wrapper function in include/linux/obd_class.h.
53761 struct lsm_operations {
53762 void (*lsm_free)(struct lov_stripe_md *);
53763 diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53764 index a4c252f..b21acac 100644
53765 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53766 +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53767 @@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53768 int added = (mode == LCK_NL);
53771 - const struct ldlm_callback_suite null_cbs = { NULL };
53772 + const struct ldlm_callback_suite null_cbs = { };
53775 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53776 diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53777 index f0ee76a..1d01af9 100644
53778 --- a/drivers/staging/lustre/lustre/libcfs/module.c
53779 +++ b/drivers/staging/lustre/lustre/libcfs/module.c
53780 @@ -380,11 +380,11 @@ out:
53783 struct cfs_psdev_ops libcfs_psdev_ops = {
53784 - libcfs_psdev_open,
53785 - libcfs_psdev_release,
53789 + .p_open = libcfs_psdev_open,
53790 + .p_close = libcfs_psdev_release,
53793 + .p_ioctl = libcfs_ioctl
53796 static int init_libcfs_module(void)
53797 @@ -631,7 +631,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53800 int rc, max_delay_cs;
53801 - struct ctl_table dummy = *table;
53802 + ctl_table_no_const dummy = *table;
53805 dummy.data = &max_delay_cs;
53806 @@ -664,7 +664,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53809 int rc, min_delay_cs;
53810 - struct ctl_table dummy = *table;
53811 + ctl_table_no_const dummy = *table;
53814 dummy.data = &min_delay_cs;
53815 @@ -696,7 +696,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53816 void __user *buffer, size_t *lenp, loff_t *ppos)
53819 - struct ctl_table dummy = *table;
53820 + ctl_table_no_const dummy = *table;
53822 dummy.data = &backoff;
53823 dummy.proc_handler = &proc_dointvec;
53824 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53825 index 22667db..8b703b6 100644
53826 --- a/drivers/staging/octeon/ethernet-rx.c
53827 +++ b/drivers/staging/octeon/ethernet-rx.c
53828 @@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53829 /* Increment RX stats for virtual ports */
53830 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53831 #ifdef CONFIG_64BIT
53833 + atomic64_add_unchecked(1,
53834 (atomic64_t *)&priv->stats.rx_packets);
53835 - atomic64_add(skb->len,
53836 + atomic64_add_unchecked(skb->len,
53837 (atomic64_t *)&priv->stats.rx_bytes);
53840 + atomic_add_unchecked(1,
53841 (atomic_t *)&priv->stats.rx_packets);
53842 - atomic_add(skb->len,
53843 + atomic_add_unchecked(skb->len,
53844 (atomic_t *)&priv->stats.rx_bytes);
53847 @@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53850 #ifdef CONFIG_64BIT
53852 + atomic64_add_unchecked(1,
53853 (atomic64_t *)&priv->stats.rx_dropped);
53856 + atomic_add_unchecked(1,
53857 (atomic_t *)&priv->stats.rx_dropped);
53859 dev_kfree_skb_irq(skb);
53860 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53861 index fbbe866..2943243 100644
53862 --- a/drivers/staging/octeon/ethernet.c
53863 +++ b/drivers/staging/octeon/ethernet.c
53864 @@ -251,11 +251,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53865 * since the RX tasklet also increments it.
53867 #ifdef CONFIG_64BIT
53868 - atomic64_add(rx_status.dropped_packets,
53869 - (atomic64_t *)&priv->stats.rx_dropped);
53870 + atomic64_add_unchecked(rx_status.dropped_packets,
53871 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53873 - atomic_add(rx_status.dropped_packets,
53874 - (atomic_t *)&priv->stats.rx_dropped);
53875 + atomic_add_unchecked(rx_status.dropped_packets,
53876 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
53880 diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53881 index 3b476d8..f522d68 100644
53882 --- a/drivers/staging/rtl8188eu/include/hal_intf.h
53883 +++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53884 @@ -225,7 +225,7 @@ struct hal_ops {
53886 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53887 void (*hal_reset_security_engine)(struct adapter *adapter);
53891 enum rt_eeprom_type {
53893 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53894 index 070cc03..6806e37 100644
53895 --- a/drivers/staging/rtl8712/rtl871x_io.h
53896 +++ b/drivers/staging/rtl8712/rtl871x_io.h
53897 @@ -108,7 +108,7 @@ struct _io_ops {
53899 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53905 struct list_head list;
53906 diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
53907 index dbbb2f8..5232114 100644
53908 --- a/drivers/staging/sm750fb/sm750.c
53909 +++ b/drivers/staging/sm750fb/sm750.c
53910 @@ -780,6 +780,7 @@ static struct fb_ops lynxfb_ops = {
53911 .fb_set_par = lynxfb_ops_set_par,
53912 .fb_setcolreg = lynxfb_ops_setcolreg,
53913 .fb_blank = lynxfb_ops_blank,
53914 + .fb_pan_display = lynxfb_ops_pan_display,
53915 .fb_fillrect = cfb_fillrect,
53916 .fb_imageblit = cfb_imageblit,
53917 .fb_copyarea = cfb_copyarea,
53918 @@ -827,8 +828,10 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
53919 par->index = index;
53920 output->channel = &crtc->channel;
53921 sm750fb_set_drv(par);
53922 - lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
53924 + pax_open_kernel();
53925 + *(void **)&lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
53926 + pax_close_kernel();
53928 /* set current cursor variable and proc pointer,
53929 * must be set after crtc member initialized */
53930 @@ -850,7 +853,9 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
53931 crtc->cursor.share = share;
53932 memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
53934 - lynxfb_ops.fb_cursor = NULL;
53935 + pax_open_kernel();
53936 + *(void **)&lynxfb_ops.fb_cursor = NULL;
53937 + pax_close_kernel();
53938 crtc->cursor.disable(&crtc->cursor);
53941 @@ -858,9 +863,11 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
53942 /* set info->fbops, must be set before fb_find_mode */
53943 if (!share->accel_off) {
53944 /* use 2d acceleration */
53945 - lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
53946 - lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
53947 - lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
53948 + pax_open_kernel();
53949 + *(void **)&lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
53950 + *(void **)&lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
53951 + *(void **)&lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
53952 + pax_close_kernel();
53954 info->fbops = &lynxfb_ops;
53956 diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53957 index bd46df9..a0a5274 100644
53958 --- a/drivers/staging/unisys/visorchipset/visorchipset.h
53959 +++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53960 @@ -170,7 +170,7 @@ struct visorchipset_busdev_notifiers {
53961 void (*device_resume)(ulong bus_no, ulong dev_no);
53962 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53967 /* These functions live inside visorchipset, and will be called to indicate
53968 * responses to specific events (by code outside of visorchipset).
53969 @@ -185,7 +185,7 @@ struct visorchipset_busdev_responders {
53970 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53971 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53972 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53976 /** Register functions (in the bus driver) to get called by visorchipset
53977 * whenever a bus or device appears for which this service partition is
53978 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53979 index 18b0f97..9c7716e 100644
53980 --- a/drivers/target/sbp/sbp_target.c
53981 +++ b/drivers/target/sbp/sbp_target.c
53982 @@ -61,7 +61,7 @@ static const u32 sbp_unit_directory_template[] = {
53984 #define SESSION_MAINTENANCE_INTERVAL HZ
53986 -static atomic_t login_id = ATOMIC_INIT(0);
53987 +static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53989 static void session_maintenance_work(struct work_struct *);
53990 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53991 @@ -443,7 +443,7 @@ static void sbp_management_request_login(
53992 login->lun = se_lun;
53993 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53994 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53995 - login->login_id = atomic_inc_return(&login_id);
53996 + login->login_id = atomic_inc_return_unchecked(&login_id);
53998 login->tgt_agt = sbp_target_agent_register(login);
53999 if (IS_ERR(login->tgt_agt)) {
54000 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
54001 index ce5f768..a4f884a 100644
54002 --- a/drivers/target/target_core_device.c
54003 +++ b/drivers/target/target_core_device.c
54004 @@ -1496,7 +1496,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
54005 spin_lock_init(&dev->se_tmr_lock);
54006 spin_lock_init(&dev->qf_cmd_lock);
54007 sema_init(&dev->caw_sem, 1);
54008 - atomic_set(&dev->dev_ordered_id, 0);
54009 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
54010 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
54011 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
54012 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
54013 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
54014 index 675f2d9..1389429 100644
54015 --- a/drivers/target/target_core_transport.c
54016 +++ b/drivers/target/target_core_transport.c
54017 @@ -1208,7 +1208,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
54018 * Used to determine when ORDERED commands should go from
54019 * Dormant to Active status.
54021 - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
54022 + cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
54023 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
54024 cmd->se_ordered_id, cmd->sam_task_attr,
54025 dev->transport->name);
54026 diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
54027 index 031018e..90981a1 100644
54028 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c
54029 +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
54030 @@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
54031 platform_set_drvdata(pdev, priv);
54033 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
54034 - int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
54035 - int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
54036 + pax_open_kernel();
54037 + *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
54038 + *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
54039 + pax_close_kernel();
54041 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
54042 priv, &int3400_thermal_ops,
54043 diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
54044 index 668fb1b..2737bbe 100644
54045 --- a/drivers/thermal/of-thermal.c
54046 +++ b/drivers/thermal/of-thermal.c
54048 #include <linux/export.h>
54049 #include <linux/string.h>
54050 #include <linux/thermal.h>
54051 +#include <linux/mm.h>
54053 #include "thermal_core.h"
54055 @@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
54057 tz->sensor_data = data;
54059 - tzd->ops->get_temp = of_thermal_get_temp;
54060 - tzd->ops->get_trend = of_thermal_get_trend;
54061 - tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
54062 + pax_open_kernel();
54063 + *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
54064 + *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
54065 + *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
54066 + pax_close_kernel();
54067 mutex_unlock(&tzd->lock);
54070 @@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
54073 mutex_lock(&tzd->lock);
54074 - tzd->ops->get_temp = NULL;
54075 - tzd->ops->get_trend = NULL;
54076 - tzd->ops->set_emul_temp = NULL;
54077 + pax_open_kernel();
54078 + *(void **)&tzd->ops->get_temp = NULL;
54079 + *(void **)&tzd->ops->get_trend = NULL;
54080 + *(void **)&tzd->ops->set_emul_temp = NULL;
54081 + pax_close_kernel();
54084 tz->sensor_data = NULL;
54085 diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
54086 index 9ea3d9d..53e8792 100644
54087 --- a/drivers/thermal/x86_pkg_temp_thermal.c
54088 +++ b/drivers/thermal/x86_pkg_temp_thermal.c
54089 @@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
54093 -static struct notifier_block pkg_temp_thermal_notifier __refdata = {
54094 +static struct notifier_block pkg_temp_thermal_notifier __refconst = {
54095 .notifier_call = pkg_temp_thermal_cpu_callback,
54098 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
54099 index fd66f57..48e6376 100644
54100 --- a/drivers/tty/cyclades.c
54101 +++ b/drivers/tty/cyclades.c
54102 @@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
54103 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
54106 - info->port.count++;
54107 + atomic_inc(&info->port.count);
54108 #ifdef CY_DEBUG_COUNT
54109 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
54110 - current->pid, info->port.count);
54111 + current->pid, atomic_read(&info->port.count));
54115 @@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
54116 for (j = 0; j < cy_card[i].nports; j++) {
54117 info = &cy_card[i].ports[j];
54119 - if (info->port.count) {
54120 + if (atomic_read(&info->port.count)) {
54121 /* XXX is the ldisc num worth this? */
54122 struct tty_struct *tty;
54123 struct tty_ldisc *ld;
54124 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
54125 index 4fcec1d..5a036f7 100644
54126 --- a/drivers/tty/hvc/hvc_console.c
54127 +++ b/drivers/tty/hvc/hvc_console.c
54128 @@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
54130 spin_lock_irqsave(&hp->port.lock, flags);
54131 /* Check and then increment for fast path open. */
54132 - if (hp->port.count++ > 0) {
54133 + if (atomic_inc_return(&hp->port.count) > 1) {
54134 spin_unlock_irqrestore(&hp->port.lock, flags);
54137 @@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54139 spin_lock_irqsave(&hp->port.lock, flags);
54141 - if (--hp->port.count == 0) {
54142 + if (atomic_dec_return(&hp->port.count) == 0) {
54143 spin_unlock_irqrestore(&hp->port.lock, flags);
54144 /* We are done with the tty pointer now. */
54145 tty_port_tty_set(&hp->port, NULL);
54146 @@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54148 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
54150 - if (hp->port.count < 0)
54151 + if (atomic_read(&hp->port.count) < 0)
54152 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
54153 - hp->vtermno, hp->port.count);
54154 + hp->vtermno, atomic_read(&hp->port.count));
54155 spin_unlock_irqrestore(&hp->port.lock, flags);
54158 @@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
54159 * open->hangup case this can be called after the final close so prevent
54160 * that from happening for now.
54162 - if (hp->port.count <= 0) {
54163 + if (atomic_read(&hp->port.count) <= 0) {
54164 spin_unlock_irqrestore(&hp->port.lock, flags);
54168 - hp->port.count = 0;
54169 + atomic_set(&hp->port.count, 0);
54170 spin_unlock_irqrestore(&hp->port.lock, flags);
54171 tty_port_tty_set(&hp->port, NULL);
54173 @@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
54176 /* FIXME what's this (unprotected) check for? */
54177 - if (hp->port.count <= 0)
54178 + if (atomic_read(&hp->port.count) <= 0)
54181 spin_lock_irqsave(&hp->lock, flags);
54182 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
54183 index 81ff7e1..dfb7b71 100644
54184 --- a/drivers/tty/hvc/hvcs.c
54185 +++ b/drivers/tty/hvc/hvcs.c
54187 #include <asm/hvcserver.h>
54188 #include <asm/uaccess.h>
54189 #include <asm/vio.h>
54190 +#include <asm/local.h>
54193 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
54194 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
54196 spin_lock_irqsave(&hvcsd->lock, flags);
54198 - if (hvcsd->port.count > 0) {
54199 + if (atomic_read(&hvcsd->port.count) > 0) {
54200 spin_unlock_irqrestore(&hvcsd->lock, flags);
54201 printk(KERN_INFO "HVCS: vterm state unchanged. "
54202 "The hvcs device node is still in use.\n");
54203 @@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
54207 - hvcsd->port.count = 0;
54208 + atomic_set(&hvcsd->port.count, 0);
54209 hvcsd->port.tty = tty;
54210 tty->driver_data = hvcsd;
54212 @@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54213 unsigned long flags;
54215 spin_lock_irqsave(&hvcsd->lock, flags);
54216 - hvcsd->port.count++;
54217 + atomic_inc(&hvcsd->port.count);
54218 hvcsd->todo_mask |= HVCS_SCHED_READ;
54219 spin_unlock_irqrestore(&hvcsd->lock, flags);
54221 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54222 hvcsd = tty->driver_data;
54224 spin_lock_irqsave(&hvcsd->lock, flags);
54225 - if (--hvcsd->port.count == 0) {
54226 + if (atomic_dec_and_test(&hvcsd->port.count)) {
54228 vio_disable_interrupts(hvcsd->vdev);
54230 @@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54232 free_irq(irq, hvcsd);
54234 - } else if (hvcsd->port.count < 0) {
54235 + } else if (atomic_read(&hvcsd->port.count) < 0) {
54236 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54237 " is missmanaged.\n",
54238 - hvcsd->vdev->unit_address, hvcsd->port.count);
54239 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54242 spin_unlock_irqrestore(&hvcsd->lock, flags);
54243 @@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54245 spin_lock_irqsave(&hvcsd->lock, flags);
54246 /* Preserve this so that we know how many kref refs to put */
54247 - temp_open_count = hvcsd->port.count;
54248 + temp_open_count = atomic_read(&hvcsd->port.count);
54251 * Don't kref put inside the spinlock because the destruction
54252 @@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54253 tty->driver_data = NULL;
54254 hvcsd->port.tty = NULL;
54256 - hvcsd->port.count = 0;
54257 + atomic_set(&hvcsd->port.count, 0);
54259 /* This will drop any buffered data on the floor which is OK in a hangup
54261 @@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54262 * the middle of a write operation? This is a crummy place to do this
54263 * but we want to keep it all in the spinlock.
54265 - if (hvcsd->port.count <= 0) {
54266 + if (atomic_read(&hvcsd->port.count) <= 0) {
54267 spin_unlock_irqrestore(&hvcsd->lock, flags);
54270 @@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54272 struct hvcs_struct *hvcsd = tty->driver_data;
54274 - if (!hvcsd || hvcsd->port.count <= 0)
54275 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54278 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54279 diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54280 index 4190199..06d5bfa 100644
54281 --- a/drivers/tty/hvc/hvsi.c
54282 +++ b/drivers/tty/hvc/hvsi.c
54283 @@ -85,7 +85,7 @@ struct hvsi_struct {
54287 - atomic_t seqno; /* HVSI packet sequence number */
54288 + atomic_unchecked_t seqno; /* HVSI packet sequence number */
54290 uint8_t state; /* HVSI protocol state */
54292 @@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54294 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54295 packet.hdr.len = sizeof(struct hvsi_query_response);
54296 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54297 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54298 packet.verb = VSV_SEND_VERSION_NUMBER;
54299 packet.u.version = HVSI_VERSION;
54300 packet.query_seqno = query_seqno+1;
54301 @@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54303 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54304 packet.hdr.len = sizeof(struct hvsi_query);
54305 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54306 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54307 packet.verb = verb;
54309 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54310 @@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54313 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54314 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54315 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54316 packet.hdr.len = sizeof(struct hvsi_control);
54317 packet.verb = VSV_SET_MODEM_CTL;
54318 packet.mask = HVSI_TSDTR;
54319 @@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54320 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54322 packet.hdr.type = VS_DATA_PACKET_HEADER;
54323 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54324 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54325 packet.hdr.len = count + sizeof(struct hvsi_header);
54326 memcpy(&packet.data, buf, count);
54328 @@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54329 struct hvsi_control packet __ALIGNED__;
54331 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54332 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54333 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54334 packet.hdr.len = 6;
54335 packet.verb = VSV_CLOSE_PROTOCOL;
54337 @@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54339 tty_port_tty_set(&hp->port, tty);
54340 spin_lock_irqsave(&hp->lock, flags);
54341 - hp->port.count++;
54342 + atomic_inc(&hp->port.count);
54343 atomic_set(&hp->seqno, 0);
54344 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54345 spin_unlock_irqrestore(&hp->lock, flags);
54346 @@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54348 spin_lock_irqsave(&hp->lock, flags);
54350 - if (--hp->port.count == 0) {
54351 + if (atomic_dec_return(&hp->port.count) == 0) {
54352 tty_port_tty_set(&hp->port, NULL);
54353 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54355 @@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54357 spin_lock_irqsave(&hp->lock, flags);
54359 - } else if (hp->port.count < 0)
54360 + } else if (atomic_read(&hp->port.count) < 0)
54361 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54362 - hp - hvsi_ports, hp->port.count);
54363 + hp - hvsi_ports, atomic_read(&hp->port.count));
54365 spin_unlock_irqrestore(&hp->lock, flags);
54367 @@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54368 tty_port_tty_set(&hp->port, NULL);
54370 spin_lock_irqsave(&hp->lock, flags);
54371 - hp->port.count = 0;
54372 + atomic_set(&hp->port.count, 0);
54374 spin_unlock_irqrestore(&hp->lock, flags);
54376 diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54377 index a270f04..7c77b5d 100644
54378 --- a/drivers/tty/hvc/hvsi_lib.c
54379 +++ b/drivers/tty/hvc/hvsi_lib.c
54382 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54384 - packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54385 + packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54387 /* Assumes that always succeeds, works in practice */
54388 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54389 @@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54392 pv->established = 0;
54393 - atomic_set(&pv->seqno, 0);
54394 + atomic_set_unchecked(&pv->seqno, 0);
54396 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54398 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54399 index 345cebb..d5a1e9e 100644
54400 --- a/drivers/tty/ipwireless/tty.c
54401 +++ b/drivers/tty/ipwireless/tty.c
54403 #include <linux/tty_driver.h>
54404 #include <linux/tty_flip.h>
54405 #include <linux/uaccess.h>
54406 +#include <asm/local.h>
54409 #include "network.h"
54410 @@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54413 mutex_lock(&tty->ipw_tty_mutex);
54414 - if (tty->port.count == 0)
54415 + if (atomic_read(&tty->port.count) == 0)
54416 tty->tx_bytes_queued = 0;
54418 - tty->port.count++;
54419 + atomic_inc(&tty->port.count);
54421 tty->port.tty = linux_tty;
54422 linux_tty->driver_data = tty;
54423 @@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54425 static void do_ipw_close(struct ipw_tty *tty)
54427 - tty->port.count--;
54429 - if (tty->port.count == 0) {
54430 + if (atomic_dec_return(&tty->port.count) == 0) {
54431 struct tty_struct *linux_tty = tty->port.tty;
54433 if (linux_tty != NULL) {
54434 @@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54437 mutex_lock(&tty->ipw_tty_mutex);
54438 - if (tty->port.count == 0) {
54439 + if (atomic_read(&tty->port.count) == 0) {
54440 mutex_unlock(&tty->ipw_tty_mutex);
54443 @@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54445 mutex_lock(&tty->ipw_tty_mutex);
54447 - if (!tty->port.count) {
54448 + if (!atomic_read(&tty->port.count)) {
54449 mutex_unlock(&tty->ipw_tty_mutex);
54452 @@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54455 mutex_lock(&tty->ipw_tty_mutex);
54456 - if (!tty->port.count) {
54457 + if (!atomic_read(&tty->port.count)) {
54458 mutex_unlock(&tty->ipw_tty_mutex);
54461 @@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54465 - if (!tty->port.count)
54466 + if (!atomic_read(&tty->port.count))
54469 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54470 @@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54474 - if (!tty->port.count)
54475 + if (!atomic_read(&tty->port.count))
54478 return tty->tx_bytes_queued;
54479 @@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54483 - if (!tty->port.count)
54484 + if (!atomic_read(&tty->port.count))
54487 return get_control_lines(tty);
54488 @@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54492 - if (!tty->port.count)
54493 + if (!atomic_read(&tty->port.count))
54496 return set_control_lines(tty, set, clear);
54497 @@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54501 - if (!tty->port.count)
54502 + if (!atomic_read(&tty->port.count))
54505 /* FIXME: Exactly how is the tty object locked here .. */
54506 @@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54508 mutex_lock(&ttyj->ipw_tty_mutex);
54510 - while (ttyj->port.count)
54511 + while (atomic_read(&ttyj->port.count))
54512 do_ipw_close(ttyj);
54513 ipwireless_disassociate_network_ttys(network,
54514 ttyj->channel_idx);
54515 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54516 index 14c54e0..1efd4f2 100644
54517 --- a/drivers/tty/moxa.c
54518 +++ b/drivers/tty/moxa.c
54519 @@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54522 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54523 - ch->port.count++;
54524 + atomic_inc(&ch->port.count);
54525 tty->driver_data = ch;
54526 tty_port_tty_set(&ch->port, tty);
54527 mutex_lock(&ch->port.mutex);
54528 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54529 index 2c34c32..81d10e1 100644
54530 --- a/drivers/tty/n_gsm.c
54531 +++ b/drivers/tty/n_gsm.c
54532 @@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54533 spin_lock_init(&dlci->lock);
54534 mutex_init(&dlci->mutex);
54535 dlci->fifo = &dlci->_fifo;
54536 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54537 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54541 @@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54542 struct gsm_dlci *dlci = tty->driver_data;
54543 struct tty_port *port = &dlci->port;
54546 + atomic_inc(&port->count);
54547 tty_port_tty_set(port, tty);
54549 dlci->modem_rx = 0;
54550 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54551 index 396344c..875c1d6 100644
54552 --- a/drivers/tty/n_tty.c
54553 +++ b/drivers/tty/n_tty.c
54554 @@ -116,7 +116,7 @@ struct n_tty_data {
54555 int minimum_to_wake;
54557 /* consumer-published */
54558 - size_t read_tail;
54559 + size_t read_tail __intentional_overflow(-1);
54562 /* protected by output lock */
54563 @@ -2572,6 +2572,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54565 *ops = tty_ldisc_N_TTY;
54567 - ops->refcount = ops->flags = 0;
54568 + atomic_set(&ops->refcount, 0);
54571 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54572 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54573 index 4d5e840..a2340a6 100644
54574 --- a/drivers/tty/pty.c
54575 +++ b/drivers/tty/pty.c
54576 @@ -849,8 +849,10 @@ static void __init unix98_pty_init(void)
54577 panic("Couldn't register Unix98 pts driver");
54579 /* Now create the /dev/ptmx special device */
54580 + pax_open_kernel();
54581 tty_default_fops(&ptmx_fops);
54582 - ptmx_fops.open = ptmx_open;
54583 + *(void **)&ptmx_fops.open = ptmx_open;
54584 + pax_close_kernel();
54586 cdev_init(&ptmx_cdev, &ptmx_fops);
54587 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54588 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54589 index c8dd8dc..dca6cfd 100644
54590 --- a/drivers/tty/rocket.c
54591 +++ b/drivers/tty/rocket.c
54592 @@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54593 tty->driver_data = info;
54594 tty_port_tty_set(port, tty);
54596 - if (port->count++ == 0) {
54597 + if (atomic_inc_return(&port->count) == 1) {
54598 atomic_inc(&rp_num_ports_open);
54600 #ifdef ROCKET_DEBUG_OPEN
54601 @@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54604 #ifdef ROCKET_DEBUG_OPEN
54605 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54606 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54610 @@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54611 spin_unlock_irqrestore(&info->port.lock, flags);
54614 - if (info->port.count)
54615 + if (atomic_read(&info->port.count))
54616 atomic_dec(&rp_num_ports_open);
54617 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54618 spin_unlock_irqrestore(&info->port.lock, flags);
54619 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
54620 index 4506e40..ac0b470 100644
54621 --- a/drivers/tty/serial/8250/8250_core.c
54622 +++ b/drivers/tty/serial/8250/8250_core.c
54623 @@ -3241,9 +3241,9 @@ static void univ8250_release_port(struct uart_port *port)
54625 static void univ8250_rsa_support(struct uart_ops *ops)
54627 - ops->config_port = univ8250_config_port;
54628 - ops->request_port = univ8250_request_port;
54629 - ops->release_port = univ8250_release_port;
54630 + *(void **)&ops->config_port = univ8250_config_port;
54631 + *(void **)&ops->request_port = univ8250_request_port;
54632 + *(void **)&ops->release_port = univ8250_release_port;
54636 @@ -3286,8 +3286,10 @@ static void __init serial8250_isa_init_ports(void)
54639 /* chain base port ops to support Remote Supervisor Adapter */
54640 - univ8250_port_ops = *base_ops;
54641 + pax_open_kernel();
54642 + memcpy((void *)&univ8250_port_ops, base_ops, sizeof univ8250_port_ops);
54643 univ8250_rsa_support(&univ8250_port_ops);
54644 + pax_close_kernel();
54647 irqflag = IRQF_SHARED;
54648 diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54649 index aa28209..e08fb85 100644
54650 --- a/drivers/tty/serial/ioc4_serial.c
54651 +++ b/drivers/tty/serial/ioc4_serial.c
54652 @@ -437,7 +437,7 @@ struct ioc4_soft {
54653 } is_intr_info[MAX_IOC4_INTR_ENTS];
54655 /* Number of entries active in the above array */
54656 - atomic_t is_num_intrs;
54657 + atomic_unchecked_t is_num_intrs;
54658 } is_intr_type[IOC4_NUM_INTR_TYPES];
54660 /* is_ir_lock must be held while
54661 @@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54662 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54663 || (type == IOC4_OTHER_INTR_TYPE)));
54665 - i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54666 + i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54667 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54669 /* Save off the lower level interrupt handler */
54670 @@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54673 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54674 - num_intrs = (int)atomic_read(
54675 + num_intrs = (int)atomic_read_unchecked(
54676 &soft->is_intr_type[intr_type].is_num_intrs);
54678 this_mir = this_ir = pending_intrs(soft, intr_type);
54679 diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54680 index 129dc5b..1da5bb8 100644
54681 --- a/drivers/tty/serial/kgdb_nmi.c
54682 +++ b/drivers/tty/serial/kgdb_nmi.c
54683 @@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54684 * I/O utilities that messages sent to the console will automatically
54685 * be displayed on the dbg_io.
54687 - dbg_io_ops->is_console = true;
54688 + pax_open_kernel();
54689 + *(int *)&dbg_io_ops->is_console = true;
54690 + pax_close_kernel();
54694 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54695 index a260cde..6b2b5ce 100644
54696 --- a/drivers/tty/serial/kgdboc.c
54697 +++ b/drivers/tty/serial/kgdboc.c
54699 #define MAX_CONFIG_LEN 40
54701 static struct kgdb_io kgdboc_io_ops;
54702 +static struct kgdb_io kgdboc_io_ops_console;
54704 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54705 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54706 static int configured = -1;
54708 static char config[MAX_CONFIG_LEN];
54709 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54710 kgdboc_unregister_kbd();
54711 if (configured == 1)
54712 kgdb_unregister_io_module(&kgdboc_io_ops);
54713 + else if (configured == 2)
54714 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
54717 static int configure_kgdboc(void)
54718 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54720 char *cptr = config;
54721 struct console *cons;
54722 + int is_console = 0;
54724 err = kgdboc_option_setup(config);
54725 if (err || !strlen(config) || isspace(config[0]))
54729 - kgdboc_io_ops.is_console = 0;
54730 kgdb_tty_driver = NULL;
54732 kgdboc_use_kms = 0;
54733 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54735 if (cons->device && cons->device(cons, &idx) == p &&
54737 - kgdboc_io_ops.is_console = 1;
54742 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54743 kgdb_tty_line = tty_line;
54746 - err = kgdb_register_io_module(&kgdboc_io_ops);
54747 + if (is_console) {
54748 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
54751 + err = kgdb_register_io_module(&kgdboc_io_ops);
54757 @@ -205,8 +214,6 @@ do_register:
54759 goto nmi_con_failed;
54766 @@ -223,7 +230,7 @@ noconfig:
54767 static int __init init_kgdboc(void)
54769 /* Already configured? */
54770 - if (configured == 1)
54771 + if (configured >= 1)
54774 return configure_kgdboc();
54775 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54776 if (config[len - 1] == '\n')
54777 config[len - 1] = '\0';
54779 - if (configured == 1)
54780 + if (configured >= 1)
54783 /* Go and configure with the new params. */
54784 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54785 .post_exception = kgdboc_post_exp_handler,
54788 +static struct kgdb_io kgdboc_io_ops_console = {
54789 + .name = "kgdboc",
54790 + .read_char = kgdboc_get_char,
54791 + .write_char = kgdboc_put_char,
54792 + .pre_exception = kgdboc_pre_exp_handler,
54793 + .post_exception = kgdboc_post_exp_handler,
54797 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54798 /* This is only available if kgdboc is a built in for early debugging */
54799 static int __init kgdboc_early_init(char *opt)
54800 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54801 index b73889c..9f74f0a 100644
54802 --- a/drivers/tty/serial/msm_serial.c
54803 +++ b/drivers/tty/serial/msm_serial.c
54804 @@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54805 .cons = MSM_CONSOLE,
54808 -static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54809 +static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54811 static const struct of_device_id msm_uartdm_table[] = {
54812 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54813 @@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54817 - line = atomic_inc_return(&msm_uart_next_id) - 1;
54818 + line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54820 if (unlikely(line < 0 || line >= UART_NR))
54822 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54823 index a0ae942..befa48d 100644
54824 --- a/drivers/tty/serial/samsung.c
54825 +++ b/drivers/tty/serial/samsung.c
54826 @@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54827 ourport->tx_in_progress = 0;
54830 +static int s3c64xx_serial_startup(struct uart_port *port);
54831 static int s3c24xx_serial_startup(struct uart_port *port)
54833 struct s3c24xx_uart_port *ourport = to_ourport(port);
54836 + /* Startup sequence is different for s3c64xx and higher SoC's */
54837 + if (s3c24xx_serial_has_interrupt_mask(port))
54838 + return s3c64xx_serial_startup(port);
54840 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54841 port, (unsigned long long)port->mapbase, port->membase);
54843 @@ -1698,10 +1703,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54844 /* setup info for port */
54845 port->dev = &platdev->dev;
54847 - /* Startup sequence is different for s3c64xx and higher SoC's */
54848 - if (s3c24xx_serial_has_interrupt_mask(port))
54849 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54853 if (cfg->uart_flags & UPF_CONS_FLOW) {
54854 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54855 index 0b7bb12..ebe191a 100644
54856 --- a/drivers/tty/serial/serial_core.c
54857 +++ b/drivers/tty/serial/serial_core.c
54858 @@ -1376,7 +1376,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54859 state = drv->state + tty->index;
54860 port = &state->port;
54861 spin_lock_irq(&port->lock);
54863 + atomic_dec(&port->count);
54864 spin_unlock_irq(&port->lock);
54867 @@ -1386,7 +1386,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54869 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54871 - if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54872 + if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54876 @@ -1510,7 +1510,7 @@ static void uart_hangup(struct tty_struct *tty)
54877 uart_flush_buffer(tty);
54878 uart_shutdown(tty, state);
54879 spin_lock_irqsave(&port->lock, flags);
54881 + atomic_set(&port->count, 0);
54882 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54883 spin_unlock_irqrestore(&port->lock, flags);
54884 tty_port_tty_set(port, NULL);
54885 @@ -1597,7 +1597,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54886 pr_debug("uart_open(%d) called\n", line);
54888 spin_lock_irq(&port->lock);
54890 + atomic_inc(&port->count);
54891 spin_unlock_irq(&port->lock);
54894 diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
54895 index b1c6bd3..5f038e2 100644
54896 --- a/drivers/tty/serial/uartlite.c
54897 +++ b/drivers/tty/serial/uartlite.c
54898 @@ -341,13 +341,13 @@ static int ulite_request_port(struct uart_port *port)
54902 - port->private_data = &uartlite_be;
54903 + port->private_data = (void *)&uartlite_be;
54904 ret = uart_in32(ULITE_CONTROL, port);
54905 uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
54906 ret = uart_in32(ULITE_STATUS, port);
54907 /* Endianess detection */
54908 if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
54909 - port->private_data = &uartlite_le;
54910 + port->private_data = (void *)&uartlite_le;
54914 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54915 index b799170..87dafd5 100644
54916 --- a/drivers/tty/synclink.c
54917 +++ b/drivers/tty/synclink.c
54918 @@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54920 if (debug_level >= DEBUG_LEVEL_INFO)
54921 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54922 - __FILE__,__LINE__, info->device_name, info->port.count);
54923 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54925 if (tty_port_close_start(&info->port, tty, filp) == 0)
54927 @@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54929 if (debug_level >= DEBUG_LEVEL_INFO)
54930 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54931 - tty->driver->name, info->port.count);
54932 + tty->driver->name, atomic_read(&info->port.count));
54934 } /* end of mgsl_close() */
54936 @@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54938 mgsl_flush_buffer(tty);
54941 - info->port.count = 0;
54943 + atomic_set(&info->port.count, 0);
54944 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54945 info->port.tty = NULL;
54947 @@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54949 if (debug_level >= DEBUG_LEVEL_INFO)
54950 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54951 - __FILE__,__LINE__, tty->driver->name, port->count );
54952 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54954 spin_lock_irqsave(&info->irq_spinlock, flags);
54956 + atomic_dec(&port->count);
54957 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54958 port->blocked_open++;
54960 @@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54962 if (debug_level >= DEBUG_LEVEL_INFO)
54963 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54964 - __FILE__,__LINE__, tty->driver->name, port->count );
54965 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54969 @@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54971 /* FIXME: Racy on hangup during close wait */
54972 if (!tty_hung_up_p(filp))
54974 + atomic_inc(&port->count);
54975 port->blocked_open--;
54977 if (debug_level >= DEBUG_LEVEL_INFO)
54978 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54979 - __FILE__,__LINE__, tty->driver->name, port->count );
54980 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54983 port->flags |= ASYNC_NORMAL_ACTIVE;
54984 @@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54986 if (debug_level >= DEBUG_LEVEL_INFO)
54987 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54988 - __FILE__,__LINE__,tty->driver->name, info->port.count);
54989 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54991 /* If port is closing, signal caller to try again */
54992 if (info->port.flags & ASYNC_CLOSING){
54993 @@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54994 spin_unlock_irqrestore(&info->netlock, flags);
54997 - info->port.count++;
54998 + atomic_inc(&info->port.count);
54999 spin_unlock_irqrestore(&info->netlock, flags);
55001 - if (info->port.count == 1) {
55002 + if (atomic_read(&info->port.count) == 1) {
55003 /* 1st open on this device, init hardware */
55004 retval = startup(info);
55006 @@ -3442,8 +3442,8 @@ cleanup:
55008 if (tty->count == 1)
55009 info->port.tty = NULL; /* tty layer will release tty struct */
55010 - if(info->port.count)
55011 - info->port.count--;
55012 + if (atomic_read(&info->port.count))
55013 + atomic_dec(&info->port.count);
55017 @@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55018 unsigned short new_crctype;
55020 /* return error if TTY interface open */
55021 - if (info->port.count)
55022 + if (atomic_read(&info->port.count))
55026 @@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
55028 /* arbitrate between network and tty opens */
55029 spin_lock_irqsave(&info->netlock, flags);
55030 - if (info->port.count != 0 || info->netcount != 0) {
55031 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55032 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55033 spin_unlock_irqrestore(&info->netlock, flags);
55035 @@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55036 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55038 /* return error if TTY interface open */
55039 - if (info->port.count)
55040 + if (atomic_read(&info->port.count))
55043 if (cmd != SIOCWANDEV)
55044 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
55045 index 0e8c39b..e0cb171 100644
55046 --- a/drivers/tty/synclink_gt.c
55047 +++ b/drivers/tty/synclink_gt.c
55048 @@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
55049 tty->driver_data = info;
55050 info->port.tty = tty;
55052 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
55053 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
55055 /* If port is closing, signal caller to try again */
55056 if (info->port.flags & ASYNC_CLOSING){
55057 @@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
55058 mutex_unlock(&info->port.mutex);
55061 - info->port.count++;
55062 + atomic_inc(&info->port.count);
55063 spin_unlock_irqrestore(&info->netlock, flags);
55065 - if (info->port.count == 1) {
55066 + if (atomic_read(&info->port.count) == 1) {
55067 /* 1st open on this device, init hardware */
55068 retval = startup(info);
55070 @@ -715,8 +715,8 @@ cleanup:
55072 if (tty->count == 1)
55073 info->port.tty = NULL; /* tty layer will release tty struct */
55074 - if(info->port.count)
55075 - info->port.count--;
55076 + if(atomic_read(&info->port.count))
55077 + atomic_dec(&info->port.count);
55080 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
55081 @@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55083 if (sanity_check(info, tty->name, "close"))
55085 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
55086 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
55088 if (tty_port_close_start(&info->port, tty, filp) == 0)
55090 @@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55091 tty_port_close_end(&info->port, tty);
55092 info->port.tty = NULL;
55094 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
55095 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
55098 static void hangup(struct tty_struct *tty)
55099 @@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
55102 spin_lock_irqsave(&info->port.lock, flags);
55103 - info->port.count = 0;
55104 + atomic_set(&info->port.count, 0);
55105 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55106 info->port.tty = NULL;
55107 spin_unlock_irqrestore(&info->port.lock, flags);
55108 @@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55109 unsigned short new_crctype;
55111 /* return error if TTY interface open */
55112 - if (info->port.count)
55113 + if (atomic_read(&info->port.count))
55116 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
55117 @@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
55119 /* arbitrate between network and tty opens */
55120 spin_lock_irqsave(&info->netlock, flags);
55121 - if (info->port.count != 0 || info->netcount != 0) {
55122 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55123 DBGINFO(("%s hdlc_open busy\n", dev->name));
55124 spin_unlock_irqrestore(&info->netlock, flags);
55126 @@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55127 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
55129 /* return error if TTY interface open */
55130 - if (info->port.count)
55131 + if (atomic_read(&info->port.count))
55134 if (cmd != SIOCWANDEV)
55135 @@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
55138 spin_lock(&port->lock);
55139 - if ((port->port.count || port->netcount) &&
55140 + if ((atomic_read(&port->port.count) || port->netcount) &&
55141 port->pending_bh && !port->bh_running &&
55142 !port->bh_requested) {
55143 DBGISR(("%s bh queued\n", port->device_name));
55144 @@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55145 add_wait_queue(&port->open_wait, &wait);
55147 spin_lock_irqsave(&info->lock, flags);
55149 + atomic_dec(&port->count);
55150 spin_unlock_irqrestore(&info->lock, flags);
55151 port->blocked_open++;
55153 @@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55154 remove_wait_queue(&port->open_wait, &wait);
55156 if (!tty_hung_up_p(filp))
55158 + atomic_inc(&port->count);
55159 port->blocked_open--;
55162 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
55163 index c3f9091..abe4601 100644
55164 --- a/drivers/tty/synclinkmp.c
55165 +++ b/drivers/tty/synclinkmp.c
55166 @@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
55168 if (debug_level >= DEBUG_LEVEL_INFO)
55169 printk("%s(%d):%s open(), old ref count = %d\n",
55170 - __FILE__,__LINE__,tty->driver->name, info->port.count);
55171 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
55173 /* If port is closing, signal caller to try again */
55174 if (info->port.flags & ASYNC_CLOSING){
55175 @@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
55176 spin_unlock_irqrestore(&info->netlock, flags);
55179 - info->port.count++;
55180 + atomic_inc(&info->port.count);
55181 spin_unlock_irqrestore(&info->netlock, flags);
55183 - if (info->port.count == 1) {
55184 + if (atomic_read(&info->port.count) == 1) {
55185 /* 1st open on this device, init hardware */
55186 retval = startup(info);
55188 @@ -796,8 +796,8 @@ cleanup:
55190 if (tty->count == 1)
55191 info->port.tty = NULL; /* tty layer will release tty struct */
55192 - if(info->port.count)
55193 - info->port.count--;
55194 + if(atomic_read(&info->port.count))
55195 + atomic_dec(&info->port.count);
55199 @@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55201 if (debug_level >= DEBUG_LEVEL_INFO)
55202 printk("%s(%d):%s close() entry, count=%d\n",
55203 - __FILE__,__LINE__, info->device_name, info->port.count);
55204 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
55206 if (tty_port_close_start(&info->port, tty, filp) == 0)
55208 @@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55210 if (debug_level >= DEBUG_LEVEL_INFO)
55211 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
55212 - tty->driver->name, info->port.count);
55213 + tty->driver->name, atomic_read(&info->port.count));
55216 /* Called by tty_hangup() when a hangup is signaled.
55217 @@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
55220 spin_lock_irqsave(&info->port.lock, flags);
55221 - info->port.count = 0;
55222 + atomic_set(&info->port.count, 0);
55223 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55224 info->port.tty = NULL;
55225 spin_unlock_irqrestore(&info->port.lock, flags);
55226 @@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55227 unsigned short new_crctype;
55229 /* return error if TTY interface open */
55230 - if (info->port.count)
55231 + if (atomic_read(&info->port.count))
55235 @@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
55237 /* arbitrate between network and tty opens */
55238 spin_lock_irqsave(&info->netlock, flags);
55239 - if (info->port.count != 0 || info->netcount != 0) {
55240 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55241 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55242 spin_unlock_irqrestore(&info->netlock, flags);
55244 @@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55245 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55247 /* return error if TTY interface open */
55248 - if (info->port.count)
55249 + if (atomic_read(&info->port.count))
55252 if (cmd != SIOCWANDEV)
55253 @@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55254 * do not request bottom half processing if the
55255 * device is not open in a normal mode.
55257 - if ( port && (port->port.count || port->netcount) &&
55258 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55259 port->pending_bh && !port->bh_running &&
55260 !port->bh_requested ) {
55261 if ( debug_level >= DEBUG_LEVEL_ISR )
55262 @@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55264 if (debug_level >= DEBUG_LEVEL_INFO)
55265 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55266 - __FILE__,__LINE__, tty->driver->name, port->count );
55267 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55269 spin_lock_irqsave(&info->lock, flags);
55271 + atomic_dec(&port->count);
55272 spin_unlock_irqrestore(&info->lock, flags);
55273 port->blocked_open++;
55275 @@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55277 if (debug_level >= DEBUG_LEVEL_INFO)
55278 printk("%s(%d):%s block_til_ready() count=%d\n",
55279 - __FILE__,__LINE__, tty->driver->name, port->count );
55280 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55284 @@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55285 set_current_state(TASK_RUNNING);
55286 remove_wait_queue(&port->open_wait, &wait);
55287 if (!tty_hung_up_p(filp))
55289 + atomic_inc(&port->count);
55290 port->blocked_open--;
55292 if (debug_level >= DEBUG_LEVEL_INFO)
55293 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55294 - __FILE__,__LINE__, tty->driver->name, port->count );
55295 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55298 port->flags |= ASYNC_NORMAL_ACTIVE;
55299 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55300 index 843f2cd..7d530a6 100644
55301 --- a/drivers/tty/sysrq.c
55302 +++ b/drivers/tty/sysrq.c
55303 @@ -1086,7 +1086,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55304 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55305 size_t count, loff_t *ppos)
55308 + if (count && capable(CAP_SYS_ADMIN)) {
55311 if (get_user(c, buf))
55312 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55313 index e569546..fbce20c 100644
55314 --- a/drivers/tty/tty_io.c
55315 +++ b/drivers/tty/tty_io.c
55316 @@ -3509,7 +3509,7 @@ EXPORT_SYMBOL(tty_devnum);
55318 void tty_default_fops(struct file_operations *fops)
55320 - *fops = tty_fops;
55321 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55325 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55326 index 3737f55..7cef448 100644
55327 --- a/drivers/tty/tty_ldisc.c
55328 +++ b/drivers/tty/tty_ldisc.c
55329 @@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55330 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55331 tty_ldiscs[disc] = new_ldisc;
55332 new_ldisc->num = disc;
55333 - new_ldisc->refcount = 0;
55334 + atomic_set(&new_ldisc->refcount, 0);
55335 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55338 @@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55341 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55342 - if (tty_ldiscs[disc]->refcount)
55343 + if (atomic_read(&tty_ldiscs[disc]->refcount))
55346 tty_ldiscs[disc] = NULL;
55347 @@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55349 ret = ERR_PTR(-EAGAIN);
55350 if (try_module_get(ldops->owner)) {
55351 - ldops->refcount++;
55352 + atomic_inc(&ldops->refcount);
55356 @@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55357 unsigned long flags;
55359 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55360 - ldops->refcount--;
55361 + atomic_dec(&ldops->refcount);
55362 module_put(ldops->owner);
55363 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55365 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55366 index 40b31835..94d92ae 100644
55367 --- a/drivers/tty/tty_port.c
55368 +++ b/drivers/tty/tty_port.c
55369 @@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
55370 unsigned long flags;
55372 spin_lock_irqsave(&port->lock, flags);
55374 + atomic_set(&port->count, 0);
55375 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55378 @@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55380 /* The port lock protects the port counts */
55381 spin_lock_irqsave(&port->lock, flags);
55383 + atomic_dec(&port->count);
55384 port->blocked_open++;
55385 spin_unlock_irqrestore(&port->lock, flags);
55387 @@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55388 we must not mess that up further */
55389 spin_lock_irqsave(&port->lock, flags);
55390 if (!tty_hung_up_p(filp))
55392 + atomic_inc(&port->count);
55393 port->blocked_open--;
55395 port->flags |= ASYNC_NORMAL_ACTIVE;
55396 @@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
55399 spin_lock_irqsave(&port->lock, flags);
55400 - if (tty->count == 1 && port->count != 1) {
55401 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
55402 printk(KERN_WARNING
55403 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55406 + atomic_read(&port->count));
55407 + atomic_set(&port->count, 1);
55409 - if (--port->count < 0) {
55410 + if (atomic_dec_return(&port->count) < 0) {
55411 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55414 + atomic_read(&port->count));
55415 + atomic_set(&port->count, 0);
55418 - if (port->count) {
55419 + if (atomic_read(&port->count)) {
55420 spin_unlock_irqrestore(&port->lock, flags);
55423 @@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55426 spin_lock_irq(&port->lock);
55428 + atomic_inc(&port->count);
55429 spin_unlock_irq(&port->lock);
55430 tty_port_tty_set(port, tty);
55432 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55433 index 8a89f6e..50b32af 100644
55434 --- a/drivers/tty/vt/keyboard.c
55435 +++ b/drivers/tty/vt/keyboard.c
55436 @@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55437 kbd->kbdmode == VC_OFF) &&
55438 value != KVAL(K_SAK))
55439 return; /* SAK is allowed even in raw mode */
55441 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55443 + void *func = fn_handler[value];
55444 + if (func == fn_show_state || func == fn_show_ptregs ||
55445 + func == fn_show_mem)
55450 fn_handler[value](vc);
55453 @@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55454 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55457 - if (!capable(CAP_SYS_TTY_CONFIG))
55462 /* Ensure another thread doesn't free it under us */
55463 @@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55464 spin_unlock_irqrestore(&kbd_event_lock, flags);
55465 return put_user(val, &user_kbe->kb_value);
55467 + if (!capable(CAP_SYS_TTY_CONFIG))
55472 if (!i && v == K_NOSUCHMAP) {
55473 @@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55477 - if (!capable(CAP_SYS_TTY_CONFIG))
55480 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55483 @@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55485 return ((p && *p) ? -EOVERFLOW : 0);
55487 + if (!capable(CAP_SYS_TTY_CONFIG))
55493 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55494 index 65bf067..b3b2e13 100644
55495 --- a/drivers/uio/uio.c
55496 +++ b/drivers/uio/uio.c
55498 #include <linux/kobject.h>
55499 #include <linux/cdev.h>
55500 #include <linux/uio_driver.h>
55501 +#include <asm/local.h>
55503 #define UIO_MAX_DEVICES (1U << MINORBITS)
55505 @@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
55506 struct device_attribute *attr, char *buf)
55508 struct uio_device *idev = dev_get_drvdata(dev);
55509 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55510 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55512 static DEVICE_ATTR_RO(event);
55514 @@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55516 struct uio_device *idev = info->uio_dev;
55518 - atomic_inc(&idev->event);
55519 + atomic_inc_unchecked(&idev->event);
55520 wake_up_interruptible(&idev->wait);
55521 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55523 @@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55526 listener->dev = idev;
55527 - listener->event_count = atomic_read(&idev->event);
55528 + listener->event_count = atomic_read_unchecked(&idev->event);
55529 filep->private_data = listener;
55531 if (idev->info->open) {
55532 @@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55535 poll_wait(filep, &idev->wait, wait);
55536 - if (listener->event_count != atomic_read(&idev->event))
55537 + if (listener->event_count != atomic_read_unchecked(&idev->event))
55538 return POLLIN | POLLRDNORM;
55541 @@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55543 set_current_state(TASK_INTERRUPTIBLE);
55545 - event_count = atomic_read(&idev->event);
55546 + event_count = atomic_read_unchecked(&idev->event);
55547 if (event_count != listener->event_count) {
55548 if (copy_to_user(buf, &event_count, count))
55550 @@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55551 static int uio_find_mem_index(struct vm_area_struct *vma)
55553 struct uio_device *idev = vma->vm_private_data;
55554 + unsigned long size;
55556 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55557 - if (idev->info->mem[vma->vm_pgoff].size == 0)
55558 + size = idev->info->mem[vma->vm_pgoff].size;
55561 + if (vma->vm_end - vma->vm_start > size)
55563 return (int)vma->vm_pgoff;
55565 @@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55566 idev->owner = owner;
55568 init_waitqueue_head(&idev->wait);
55569 - atomic_set(&idev->event, 0);
55570 + atomic_set_unchecked(&idev->event, 0);
55572 ret = uio_get_minor(idev);
55574 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55575 index 813d4d3..a71934f 100644
55576 --- a/drivers/usb/atm/cxacru.c
55577 +++ b/drivers/usb/atm/cxacru.c
55578 @@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55579 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55582 - if (index < 0 || index > 0x7f)
55583 + if (index > 0x7f)
55587 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55588 index dada014..1d0d517 100644
55589 --- a/drivers/usb/atm/usbatm.c
55590 +++ b/drivers/usb/atm/usbatm.c
55591 @@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55592 if (printk_ratelimit())
55593 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55594 __func__, vpi, vci);
55595 - atomic_inc(&vcc->stats->rx_err);
55596 + atomic_inc_unchecked(&vcc->stats->rx_err);
55600 @@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55601 if (length > ATM_MAX_AAL5_PDU) {
55602 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55603 __func__, length, vcc);
55604 - atomic_inc(&vcc->stats->rx_err);
55605 + atomic_inc_unchecked(&vcc->stats->rx_err);
55609 @@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55610 if (sarb->len < pdu_length) {
55611 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55612 __func__, pdu_length, sarb->len, vcc);
55613 - atomic_inc(&vcc->stats->rx_err);
55614 + atomic_inc_unchecked(&vcc->stats->rx_err);
55618 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55619 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55621 - atomic_inc(&vcc->stats->rx_err);
55622 + atomic_inc_unchecked(&vcc->stats->rx_err);
55626 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55627 if (printk_ratelimit())
55628 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55630 - atomic_inc(&vcc->stats->rx_drop);
55631 + atomic_inc_unchecked(&vcc->stats->rx_drop);
55635 @@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55637 vcc->push(vcc, skb);
55639 - atomic_inc(&vcc->stats->rx);
55640 + atomic_inc_unchecked(&vcc->stats->rx);
55644 @@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55645 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55647 usbatm_pop(vcc, skb);
55648 - atomic_inc(&vcc->stats->tx);
55649 + atomic_inc_unchecked(&vcc->stats->tx);
55651 skb = skb_dequeue(&instance->sndqueue);
55653 @@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55655 return sprintf(page,
55656 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55657 - atomic_read(&atm_dev->stats.aal5.tx),
55658 - atomic_read(&atm_dev->stats.aal5.tx_err),
55659 - atomic_read(&atm_dev->stats.aal5.rx),
55660 - atomic_read(&atm_dev->stats.aal5.rx_err),
55661 - atomic_read(&atm_dev->stats.aal5.rx_drop));
55662 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55663 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55664 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55665 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55666 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55669 if (instance->disconnected)
55670 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55671 index 2a3bbdf..91d72cf 100644
55672 --- a/drivers/usb/core/devices.c
55673 +++ b/drivers/usb/core/devices.c
55674 @@ -126,7 +126,7 @@ static const char format_endpt[] =
55675 * time it gets called.
55677 static struct device_connect_event {
55679 + atomic_unchecked_t count;
55680 wait_queue_head_t wait;
55682 .count = ATOMIC_INIT(1),
55683 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55685 void usbfs_conn_disc_event(void)
55687 - atomic_add(2, &device_event.count);
55688 + atomic_add_unchecked(2, &device_event.count);
55689 wake_up(&device_event.wait);
55692 @@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55694 poll_wait(file, &device_event.wait, wait);
55696 - event_count = atomic_read(&device_event.count);
55697 + event_count = atomic_read_unchecked(&device_event.count);
55698 if (file->f_version != event_count) {
55699 file->f_version = event_count;
55700 return POLLIN | POLLRDNORM;
55701 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55702 index 4b0448c..fc84bec 100644
55703 --- a/drivers/usb/core/devio.c
55704 +++ b/drivers/usb/core/devio.c
55705 @@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55706 struct usb_dev_state *ps = file->private_data;
55707 struct usb_device *dev = ps->dev;
55714 @@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55715 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55716 struct usb_config_descriptor *config =
55717 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55718 - unsigned int length = le16_to_cpu(config->wTotalLength);
55719 + size_t length = le16_to_cpu(config->wTotalLength);
55721 if (*ppos < pos + length) {
55723 /* The descriptor may claim to be longer than it
55724 * really is. Here is the actual allocated length. */
55725 - unsigned alloclen =
55726 + size_t alloclen =
55727 le16_to_cpu(dev->config[i].desc.wTotalLength);
55729 - len = length - (*ppos - pos);
55730 + len = length + pos - *ppos;
55734 /* Simply don't write (skip over) unallocated parts */
55735 if (alloclen > (*ppos - pos)) {
55736 - alloclen -= (*ppos - pos);
55737 + alloclen = alloclen + pos - *ppos;
55738 if (copy_to_user(buf,
55739 dev->rawdescriptors[i] + (*ppos - pos),
55740 min(len, alloclen))) {
55741 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55742 index 45a915c..09f9735 100644
55743 --- a/drivers/usb/core/hcd.c
55744 +++ b/drivers/usb/core/hcd.c
55745 @@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55748 atomic_inc(&urb->use_count);
55749 - atomic_inc(&urb->dev->urbnum);
55750 + atomic_inc_unchecked(&urb->dev->urbnum);
55751 usbmon_urb_submit(&hcd->self, urb);
55753 /* NOTE requirements on root-hub callers (usbfs and the hub
55754 @@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55755 urb->hcpriv = NULL;
55756 INIT_LIST_HEAD(&urb->urb_list);
55757 atomic_dec(&urb->use_count);
55758 - atomic_dec(&urb->dev->urbnum);
55759 + atomic_dec_unchecked(&urb->dev->urbnum);
55760 if (atomic_read(&urb->reject))
55761 wake_up(&usb_kill_urb_queue);
55763 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55764 index 3b71516..1f26579 100644
55765 --- a/drivers/usb/core/hub.c
55766 +++ b/drivers/usb/core/hub.c
55768 #include <linux/mutex.h>
55769 #include <linux/random.h>
55770 #include <linux/pm_qos.h>
55771 +#include <linux/grsecurity.h>
55773 #include <asm/uaccess.h>
55774 #include <asm/byteorder.h>
55775 @@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55780 + if (gr_handle_new_usb())
55783 if (hub_is_superspeed(hub->hdev))
55786 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55787 index f368d20..0c30ac5 100644
55788 --- a/drivers/usb/core/message.c
55789 +++ b/drivers/usb/core/message.c
55790 @@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55791 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55794 -int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55795 +int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55796 __u8 requesttype, __u16 value, __u16 index, void *data,
55797 __u16 size, int timeout)
55799 @@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55800 * If successful, 0. Otherwise a negative error number. The number of actual
55801 * bytes transferred will be stored in the @actual_length parameter.
55803 -int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55804 +int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55805 void *data, int len, int *actual_length, int timeout)
55807 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55808 @@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55809 * bytes transferred will be stored in the @actual_length parameter.
55812 -int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55813 +int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55814 void *data, int len, int *actual_length, int timeout)
55817 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55818 index d269738..7340cd7 100644
55819 --- a/drivers/usb/core/sysfs.c
55820 +++ b/drivers/usb/core/sysfs.c
55821 @@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55822 struct usb_device *udev;
55824 udev = to_usb_device(dev);
55825 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55826 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55828 static DEVICE_ATTR_RO(urbnum);
55830 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55831 index 8d5b2f4..3896940 100644
55832 --- a/drivers/usb/core/usb.c
55833 +++ b/drivers/usb/core/usb.c
55834 @@ -447,7 +447,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55835 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55836 dev->state = USB_STATE_ATTACHED;
55837 dev->lpm_disable_count = 1;
55838 - atomic_set(&dev->urbnum, 0);
55839 + atomic_set_unchecked(&dev->urbnum, 0);
55841 INIT_LIST_HEAD(&dev->ep0.urb_list);
55842 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55843 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55844 index 8cfc319..4868255 100644
55845 --- a/drivers/usb/early/ehci-dbgp.c
55846 +++ b/drivers/usb/early/ehci-dbgp.c
55847 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55850 static struct kgdb_io kgdbdbgp_io_ops;
55851 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55852 +static struct kgdb_io kgdbdbgp_io_ops_console;
55853 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55855 #define dbgp_kgdb_mode (0)
55857 @@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55858 .write_char = kgdbdbgp_write_char,
55861 +static struct kgdb_io kgdbdbgp_io_ops_console = {
55862 + .name = "kgdbdbgp",
55863 + .read_char = kgdbdbgp_read_char,
55864 + .write_char = kgdbdbgp_write_char,
55868 static int kgdbdbgp_wait_time;
55870 static int __init kgdbdbgp_parse_config(char *str)
55871 @@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55873 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55875 - kgdb_register_io_module(&kgdbdbgp_io_ops);
55876 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55877 + if (early_dbgp_console.index != -1)
55878 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55880 + kgdb_register_io_module(&kgdbdbgp_io_ops);
55884 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
55885 index 0495c94..289e201 100644
55886 --- a/drivers/usb/gadget/configfs.c
55887 +++ b/drivers/usb/gadget/configfs.c
55888 @@ -571,7 +571,7 @@ static struct config_group *function_make(
55890 return ERR_CAST(fi);
55892 - ret = config_item_set_name(&fi->group.cg_item, name);
55893 + ret = config_item_set_name(&fi->group.cg_item, "%s", name);
55895 usb_put_function_instance(fi);
55896 return ERR_PTR(ret);
55897 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55898 index 7856b33..8b7fe09 100644
55899 --- a/drivers/usb/gadget/function/f_uac1.c
55900 +++ b/drivers/usb/gadget/function/f_uac1.c
55902 #include <linux/module.h>
55903 #include <linux/device.h>
55904 #include <linux/atomic.h>
55905 +#include <linux/module.h>
55907 #include "u_uac1.h"
55909 diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55910 index 7ee05793..2e31e99 100644
55911 --- a/drivers/usb/gadget/function/u_serial.c
55912 +++ b/drivers/usb/gadget/function/u_serial.c
55913 @@ -732,9 +732,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55914 spin_lock_irq(&port->port_lock);
55916 /* already open? Great. */
55917 - if (port->port.count) {
55918 + if (atomic_read(&port->port.count)) {
55920 - port->port.count++;
55921 + atomic_inc(&port->port.count);
55923 /* currently opening/closing? wait ... */
55924 } else if (port->openclose) {
55925 @@ -793,7 +793,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55926 tty->driver_data = port;
55927 port->port.tty = tty;
55929 - port->port.count = 1;
55930 + atomic_set(&port->port.count, 1);
55931 port->openclose = false;
55933 /* if connected, start the I/O stream */
55934 @@ -835,11 +835,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55936 spin_lock_irq(&port->port_lock);
55938 - if (port->port.count != 1) {
55939 - if (port->port.count == 0)
55940 + if (atomic_read(&port->port.count) != 1) {
55941 + if (atomic_read(&port->port.count) == 0)
55944 - --port->port.count;
55945 + atomic_dec(&port->port.count);
55949 @@ -849,7 +849,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55950 * and sleep if necessary
55952 port->openclose = true;
55953 - port->port.count = 0;
55954 + atomic_set(&port->port.count, 0);
55956 gser = port->port_usb;
55957 if (gser && gser->disconnect)
55958 @@ -1065,7 +1065,7 @@ static int gs_closed(struct gs_port *port)
55961 spin_lock_irq(&port->port_lock);
55962 - cond = (port->port.count == 0) && !port->openclose;
55963 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55964 spin_unlock_irq(&port->port_lock);
55967 @@ -1208,7 +1208,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55968 /* if it's already open, start I/O ... and notify the serial
55969 * protocol about open/close status (connect/disconnect).
55971 - if (port->port.count) {
55972 + if (atomic_read(&port->port.count)) {
55973 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55976 @@ -1255,7 +1255,7 @@ void gserial_disconnect(struct gserial *gser)
55978 port->port_usb = NULL;
55979 gser->ioport = NULL;
55980 - if (port->port.count > 0 || port->openclose) {
55981 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
55982 wake_up_interruptible(&port->drain_wait);
55983 if (port->port.tty)
55984 tty_hangup(port->port.tty);
55985 @@ -1271,7 +1271,7 @@ void gserial_disconnect(struct gserial *gser)
55987 /* finally, free any unused/unusable I/O buffers */
55988 spin_lock_irqsave(&port->port_lock, flags);
55989 - if (port->port.count == 0 && !port->openclose)
55990 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
55991 gs_buf_free(&port->port_write_buf);
55992 gs_free_requests(gser->out, &port->read_pool, NULL);
55993 gs_free_requests(gser->out, &port->read_queue, NULL);
55994 diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55995 index c78c841..48fd281 100644
55996 --- a/drivers/usb/gadget/function/u_uac1.c
55997 +++ b/drivers/usb/gadget/function/u_uac1.c
55999 #include <linux/ctype.h>
56000 #include <linux/random.h>
56001 #include <linux/syscalls.h>
56002 +#include <linux/module.h>
56004 #include "u_uac1.h"
56006 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
56007 index 6920844..480bb7e 100644
56008 --- a/drivers/usb/host/ehci-hub.c
56009 +++ b/drivers/usb/host/ehci-hub.c
56010 @@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
56011 urb->transfer_flags = URB_DIR_IN;
56013 atomic_inc(&urb->use_count);
56014 - atomic_inc(&urb->dev->urbnum);
56015 + atomic_inc_unchecked(&urb->dev->urbnum);
56016 urb->setup_dma = dma_map_single(
56017 hcd->self.controller,
56019 @@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
56020 urb->status = -EINPROGRESS;
56022 atomic_inc(&urb->use_count);
56023 - atomic_inc(&urb->dev->urbnum);
56024 + atomic_inc_unchecked(&urb->dev->urbnum);
56025 retval = submit_single_step_set_feature(hcd, urb, 0);
56026 if (!retval && !wait_for_completion_timeout(&done,
56027 msecs_to_jiffies(2000))) {
56028 diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
56029 index 1db0626..4948782 100644
56030 --- a/drivers/usb/host/hwa-hc.c
56031 +++ b/drivers/usb/host/hwa-hc.c
56032 @@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
56033 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
56034 struct wahc *wa = &hwahc->wa;
56035 struct device *dev = &wa->usb_iface->dev;
56036 - u8 mas_le[UWB_NUM_MAS/8];
56037 + u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
56039 + if (mas_le == NULL)
56042 /* Set the stream index */
56043 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
56044 @@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
56045 WUSB_REQ_SET_WUSB_MAS,
56046 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
56047 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
56048 - mas_le, 32, USB_CTRL_SET_TIMEOUT);
56049 + mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
56051 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
56058 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
56059 index a0a3827..d7ec10b 100644
56060 --- a/drivers/usb/misc/appledisplay.c
56061 +++ b/drivers/usb/misc/appledisplay.c
56062 @@ -84,7 +84,7 @@ struct appledisplay {
56063 struct mutex sysfslock; /* concurrent read and write */
56066 -static atomic_t count_displays = ATOMIC_INIT(0);
56067 +static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
56068 static struct workqueue_struct *wq;
56070 static void appledisplay_complete(struct urb *urb)
56071 @@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
56073 /* Register backlight device */
56074 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
56075 - atomic_inc_return(&count_displays) - 1);
56076 + atomic_inc_return_unchecked(&count_displays) - 1);
56077 memset(&props, 0, sizeof(struct backlight_properties));
56078 props.type = BACKLIGHT_RAW;
56079 props.max_brightness = 0xff;
56080 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
56081 index 3806e70..55c508b 100644
56082 --- a/drivers/usb/serial/console.c
56083 +++ b/drivers/usb/serial/console.c
56084 @@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
56088 - ++port->port.count;
56089 + atomic_inc(&port->port.count);
56090 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
56091 if (serial->type->set_termios) {
56093 @@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
56095 /* Now that any required fake tty operations are completed restore
56096 * the tty port count */
56097 - --port->port.count;
56098 + atomic_dec(&port->port.count);
56099 /* The console is special in terms of closing the device so
56100 * indicate this port is now acting as a system console. */
56101 port->port.console = 1;
56102 @@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
56106 - port->port.count = 0;
56107 + atomic_set(&port->port.count, 0);
56108 usb_autopm_put_interface(serial->interface);
56109 error_get_interface:
56110 usb_serial_put(serial);
56111 @@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
56112 static void usb_console_write(struct console *co,
56113 const char *buf, unsigned count)
56115 - static struct usbcons_info *info = &usbcons_info;
56116 + struct usbcons_info *info = &usbcons_info;
56117 struct usb_serial_port *port = info->port;
56118 struct usb_serial *serial;
56119 int retval = -ENODEV;
56120 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
56121 index 307e339..6aa97cb 100644
56122 --- a/drivers/usb/storage/usb.h
56123 +++ b/drivers/usb/storage/usb.h
56124 @@ -63,7 +63,7 @@ struct us_unusual_dev {
56127 int (*initFunction)(struct us_data *);
56132 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
56133 diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
56134 index a863a98..d272795 100644
56135 --- a/drivers/usb/usbip/vhci.h
56136 +++ b/drivers/usb/usbip/vhci.h
56137 @@ -83,7 +83,7 @@ struct vhci_hcd {
56138 unsigned resuming:1;
56139 unsigned long re_timeout;
56142 + atomic_unchecked_t seqnum;
56146 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
56147 index e9ef1ec..c3a0b04 100644
56148 --- a/drivers/usb/usbip/vhci_hcd.c
56149 +++ b/drivers/usb/usbip/vhci_hcd.c
56150 @@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
56152 spin_lock(&vdev->priv_lock);
56154 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
56155 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
56156 if (priv->seqnum == 0xffff)
56157 dev_info(&urb->dev->dev, "seqnum max\n");
56159 @@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
56163 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
56164 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
56165 if (unlink->seqnum == 0xffff)
56166 pr_info("seqnum max\n");
56168 @@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
56169 vdev->rhport = rhport;
56172 - atomic_set(&vhci->seqnum, 0);
56173 + atomic_set_unchecked(&vhci->seqnum, 0);
56174 spin_lock_init(&vhci->lock);
56176 hcd->power_budget = 0; /* no limit */
56177 diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
56178 index 00e4a54..d676f85 100644
56179 --- a/drivers/usb/usbip/vhci_rx.c
56180 +++ b/drivers/usb/usbip/vhci_rx.c
56181 @@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
56183 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
56184 pr_info("max seqnum %d\n",
56185 - atomic_read(&the_controller->seqnum));
56186 + atomic_read_unchecked(&the_controller->seqnum));
56187 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
56190 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
56191 index edc7267..9f65ce2 100644
56192 --- a/drivers/usb/wusbcore/wa-hc.h
56193 +++ b/drivers/usb/wusbcore/wa-hc.h
56194 @@ -240,7 +240,7 @@ struct wahc {
56195 spinlock_t xfer_list_lock;
56196 struct work_struct xfer_enqueue_work;
56197 struct work_struct xfer_error_work;
56198 - atomic_t xfer_id_count;
56199 + atomic_unchecked_t xfer_id_count;
56201 kernel_ulong_t quirks;
56203 @@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
56204 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
56205 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
56206 wa->dto_in_use = 0;
56207 - atomic_set(&wa->xfer_id_count, 1);
56208 + atomic_set_unchecked(&wa->xfer_id_count, 1);
56209 /* init the buf in URBs */
56210 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
56211 usb_init_urb(&(wa->buf_in_urbs[index]));
56212 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
56213 index 69af4fd..da390d7 100644
56214 --- a/drivers/usb/wusbcore/wa-xfer.c
56215 +++ b/drivers/usb/wusbcore/wa-xfer.c
56216 @@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
56218 static void wa_xfer_id_init(struct wa_xfer *xfer)
56220 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
56221 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
56224 /* Return the xfer's ID. */
56225 diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
56226 index e1278fe..7fdeac4 100644
56227 --- a/drivers/vfio/vfio.c
56228 +++ b/drivers/vfio/vfio.c
56229 @@ -517,7 +517,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
56232 /* TODO Prevent device auto probing */
56233 - WARN("Device %s added to live group %d!\n", dev_name(dev),
56234 + WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
56235 iommu_group_id(group->iommu_group));
56238 diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
56239 index 3bb02c6..a01ff38 100644
56240 --- a/drivers/vhost/vringh.c
56241 +++ b/drivers/vhost/vringh.c
56242 @@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
56243 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
56246 - int rc = get_user(v, (__force __virtio16 __user *)p);
56247 + int rc = get_user(v, (__force_user __virtio16 *)p);
56248 *val = vringh16_to_cpu(vrh, v);
56251 @@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
56252 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
56254 __virtio16 v = cpu_to_vringh16(vrh, val);
56255 - return put_user(v, (__force __virtio16 __user *)p);
56256 + return put_user(v, (__force_user __virtio16 *)p);
56259 static inline int copydesc_user(void *dst, const void *src, size_t len)
56261 - return copy_from_user(dst, (__force void __user *)src, len) ?
56262 + return copy_from_user(dst, (void __force_user *)src, len) ?
56266 @@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56267 const struct vring_used_elem *src,
56270 - return copy_to_user((__force void __user *)dst, src,
56271 + return copy_to_user((void __force_user *)dst, src,
56272 sizeof(*dst) * num) ? -EFAULT : 0;
56275 static inline int xfer_from_user(void *src, void *dst, size_t len)
56277 - return copy_from_user(dst, (__force void __user *)src, len) ?
56278 + return copy_from_user(dst, (void __force_user *)src, len) ?
56282 static inline int xfer_to_user(void *dst, void *src, size_t len)
56284 - return copy_to_user((__force void __user *)dst, src, len) ?
56285 + return copy_to_user((void __force_user *)dst, src, len) ?
56289 @@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
56290 vrh->last_used_idx = 0;
56291 vrh->vring.num = num;
56292 /* vring expects kernel addresses, but only used via accessors. */
56293 - vrh->vring.desc = (__force struct vring_desc *)desc;
56294 - vrh->vring.avail = (__force struct vring_avail *)avail;
56295 - vrh->vring.used = (__force struct vring_used *)used;
56296 + vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56297 + vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56298 + vrh->vring.used = (__force_kernel struct vring_used *)used;
56301 EXPORT_SYMBOL(vringh_init_user);
56302 @@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
56304 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
56306 - ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
56307 + ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
56311 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56312 index 84a110a..96312c3 100644
56313 --- a/drivers/video/backlight/kb3886_bl.c
56314 +++ b/drivers/video/backlight/kb3886_bl.c
56315 @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56316 static unsigned long kb3886bl_flags;
56317 #define KB3886BL_SUSPENDED 0x01
56319 -static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56320 +static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56322 .ident = "Sahara Touch-iT",
56324 diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56325 index 1b0b233..6f34c2c 100644
56326 --- a/drivers/video/fbdev/arcfb.c
56327 +++ b/drivers/video/fbdev/arcfb.c
56328 @@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56332 - if ((count + p) > fbmemlength) {
56333 + if (count > (fbmemlength - p)) {
56334 count = fbmemlength - p;
56337 diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56338 index 0156954..c07d4e0 100644
56339 --- a/drivers/video/fbdev/aty/aty128fb.c
56340 +++ b/drivers/video/fbdev/aty/aty128fb.c
56341 @@ -149,7 +149,7 @@ enum {
56344 /* Must match above enum */
56345 -static char * const r128_family[] = {
56346 +static const char * const r128_family[] = {
56350 diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56351 index 8789e48..698fe4c 100644
56352 --- a/drivers/video/fbdev/aty/atyfb_base.c
56353 +++ b/drivers/video/fbdev/aty/atyfb_base.c
56354 @@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56355 par->accel_flags = var->accel_flags; /* hack */
56357 if (var->accel_flags) {
56358 - info->fbops->fb_sync = atyfb_sync;
56359 + pax_open_kernel();
56360 + *(void **)&info->fbops->fb_sync = atyfb_sync;
56361 + pax_close_kernel();
56362 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56364 - info->fbops->fb_sync = NULL;
56365 + pax_open_kernel();
56366 + *(void **)&info->fbops->fb_sync = NULL;
56367 + pax_close_kernel();
56368 info->flags |= FBINFO_HWACCEL_DISABLED;
56371 diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56372 index 2fa0317..4983f2a 100644
56373 --- a/drivers/video/fbdev/aty/mach64_cursor.c
56374 +++ b/drivers/video/fbdev/aty/mach64_cursor.c
56376 #include "../core/fb_draw.h"
56378 #include <asm/io.h>
56379 +#include <asm/pgtable.h>
56382 #include <asm/fbio.h>
56383 @@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56384 info->sprite.buf_align = 16; /* and 64 lines tall. */
56385 info->sprite.flags = FB_PIXMAP_IO;
56387 - info->fbops->fb_cursor = atyfb_cursor;
56388 + pax_open_kernel();
56389 + *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56390 + pax_close_kernel();
56394 diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56395 index d6cab1f..112f680 100644
56396 --- a/drivers/video/fbdev/core/fb_defio.c
56397 +++ b/drivers/video/fbdev/core/fb_defio.c
56398 @@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
56401 mutex_init(&fbdefio->lock);
56402 - info->fbops->fb_mmap = fb_deferred_io_mmap;
56403 + pax_open_kernel();
56404 + *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56405 + pax_close_kernel();
56406 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56407 INIT_LIST_HEAD(&fbdefio->pagelist);
56408 if (fbdefio->delay == 0) /* set a default of 1 s */
56409 @@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56410 page->mapping = NULL;
56413 - info->fbops->fb_mmap = NULL;
56414 + *(void **)&info->fbops->fb_mmap = NULL;
56415 mutex_destroy(&fbdefio->lock);
56417 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56418 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56419 index 0705d88..d9429bf 100644
56420 --- a/drivers/video/fbdev/core/fbmem.c
56421 +++ b/drivers/video/fbdev/core/fbmem.c
56422 @@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56426 - err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56427 + err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56429 data = (__u32) (unsigned long) fix->smem_start;
56430 err |= put_user(data, &fix32->smem_start);
56431 diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56432 index 807ee22..7814cd6 100644
56433 --- a/drivers/video/fbdev/hyperv_fb.c
56434 +++ b/drivers/video/fbdev/hyperv_fb.c
56435 @@ -240,7 +240,7 @@ static uint screen_fb_size;
56436 static inline int synthvid_send(struct hv_device *hdev,
56437 struct synthvid_msg *msg)
56439 - static atomic64_t request_id = ATOMIC64_INIT(0);
56440 + static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56443 msg->pipe_hdr.type = PIPE_MSG_DATA;
56444 @@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56446 ret = vmbus_sendpacket(hdev->channel, msg,
56447 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56448 - atomic64_inc_return(&request_id),
56449 + atomic64_inc_return_unchecked(&request_id),
56450 VM_PKT_DATA_INBAND, 0);
56453 diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56454 index 7672d2e..b56437f 100644
56455 --- a/drivers/video/fbdev/i810/i810_accel.c
56456 +++ b/drivers/video/fbdev/i810/i810_accel.c
56457 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56460 printk("ringbuffer lockup!!!\n");
56461 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56462 i810_report_error(mmio);
56463 par->dev_flags |= LOCKUP;
56464 info->pixmap.scan_align = 1;
56465 diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56466 index a01147f..5d896f8 100644
56467 --- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56468 +++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56469 @@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56471 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56472 struct matrox_switch matrox_mystique = {
56473 - MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56474 + .preinit = MGA1064_preinit,
56475 + .reset = MGA1064_reset,
56476 + .init = MGA1064_init,
56477 + .restore = MGA1064_restore,
56479 EXPORT_SYMBOL(matrox_mystique);
56482 #ifdef CONFIG_FB_MATROX_G
56483 struct matrox_switch matrox_G100 = {
56484 - MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56485 + .preinit = MGAG100_preinit,
56486 + .reset = MGAG100_reset,
56487 + .init = MGAG100_init,
56488 + .restore = MGAG100_restore,
56490 EXPORT_SYMBOL(matrox_G100);
56492 diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56493 index 195ad7c..09743fc 100644
56494 --- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56495 +++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56496 @@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56499 struct matrox_switch matrox_millennium = {
56500 - Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56501 + .preinit = Ti3026_preinit,
56502 + .reset = Ti3026_reset,
56503 + .init = Ti3026_init,
56504 + .restore = Ti3026_restore
56506 EXPORT_SYMBOL(matrox_millennium);
56508 diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56509 index fe92eed..106e085 100644
56510 --- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56511 +++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56512 @@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56513 struct mb862xxfb_par *par = info->par;
56515 if (info->var.bits_per_pixel == 32) {
56516 - info->fbops->fb_fillrect = cfb_fillrect;
56517 - info->fbops->fb_copyarea = cfb_copyarea;
56518 - info->fbops->fb_imageblit = cfb_imageblit;
56519 + pax_open_kernel();
56520 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56521 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56522 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56523 + pax_close_kernel();
56525 outreg(disp, GC_L0EM, 3);
56526 - info->fbops->fb_fillrect = mb86290fb_fillrect;
56527 - info->fbops->fb_copyarea = mb86290fb_copyarea;
56528 - info->fbops->fb_imageblit = mb86290fb_imageblit;
56529 + pax_open_kernel();
56530 + *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56531 + *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56532 + *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56533 + pax_close_kernel();
56535 outreg(draw, GDC_REG_DRAW_BASE, 0);
56536 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56537 diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56538 index 4273c6e..b413013 100644
56539 --- a/drivers/video/fbdev/nvidia/nvidia.c
56540 +++ b/drivers/video/fbdev/nvidia/nvidia.c
56541 @@ -665,19 +665,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56542 info->fix.line_length = (info->var.xres_virtual *
56543 info->var.bits_per_pixel) >> 3;
56544 if (info->var.accel_flags) {
56545 - info->fbops->fb_imageblit = nvidiafb_imageblit;
56546 - info->fbops->fb_fillrect = nvidiafb_fillrect;
56547 - info->fbops->fb_copyarea = nvidiafb_copyarea;
56548 - info->fbops->fb_sync = nvidiafb_sync;
56549 + pax_open_kernel();
56550 + *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56551 + *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56552 + *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56553 + *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56554 + pax_close_kernel();
56555 info->pixmap.scan_align = 4;
56556 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56557 info->flags |= FBINFO_READS_FAST;
56558 NVResetGraphics(info);
56560 - info->fbops->fb_imageblit = cfb_imageblit;
56561 - info->fbops->fb_fillrect = cfb_fillrect;
56562 - info->fbops->fb_copyarea = cfb_copyarea;
56563 - info->fbops->fb_sync = NULL;
56564 + pax_open_kernel();
56565 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56566 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56567 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56568 + *(void **)&info->fbops->fb_sync = NULL;
56569 + pax_close_kernel();
56570 info->pixmap.scan_align = 1;
56571 info->flags |= FBINFO_HWACCEL_DISABLED;
56572 info->flags &= ~FBINFO_READS_FAST;
56573 @@ -1169,8 +1173,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56574 info->pixmap.size = 8 * 1024;
56575 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56578 - info->fbops->fb_cursor = NULL;
56580 + pax_open_kernel();
56581 + *(void **)&info->fbops->fb_cursor = NULL;
56582 + pax_close_kernel();
56585 info->var.accel_flags = (!noaccel);
56587 diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56588 index ef5b902..47cf7f5 100644
56589 --- a/drivers/video/fbdev/omap2/dss/display.c
56590 +++ b/drivers/video/fbdev/omap2/dss/display.c
56591 @@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56592 if (dssdev->name == NULL)
56593 dssdev->name = dssdev->alias;
56595 + pax_open_kernel();
56596 if (drv && drv->get_resolution == NULL)
56597 - drv->get_resolution = omapdss_default_get_resolution;
56598 + *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56599 if (drv && drv->get_recommended_bpp == NULL)
56600 - drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56601 + *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56602 if (drv && drv->get_timings == NULL)
56603 - drv->get_timings = omapdss_default_get_timings;
56604 + *(void **)&drv->get_timings = omapdss_default_get_timings;
56605 + pax_close_kernel();
56607 mutex_lock(&panel_list_mutex);
56608 list_add_tail(&dssdev->panel_list, &panel_list);
56609 diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56610 index 83433cb..71e9b98 100644
56611 --- a/drivers/video/fbdev/s1d13xxxfb.c
56612 +++ b/drivers/video/fbdev/s1d13xxxfb.c
56613 @@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56616 case S1D13506_PROD_ID: /* activate acceleration */
56617 - s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56618 - s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56619 + pax_open_kernel();
56620 + *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56621 + *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56622 + pax_close_kernel();
56623 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56624 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56626 diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56627 index 82c0a8c..42499a1 100644
56628 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56629 +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56630 @@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56633 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56634 - lcdc_sys_write_index,
56635 - lcdc_sys_write_data,
56636 - lcdc_sys_read_data,
56637 + .write_index = lcdc_sys_write_index,
56638 + .write_data = lcdc_sys_write_data,
56639 + .read_data = lcdc_sys_read_data,
56642 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56643 diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56644 index 9279e5f..d5f5276 100644
56645 --- a/drivers/video/fbdev/smscufx.c
56646 +++ b/drivers/video/fbdev/smscufx.c
56647 @@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56648 fb_deferred_io_cleanup(info);
56649 kfree(info->fbdefio);
56650 info->fbdefio = NULL;
56651 - info->fbops->fb_mmap = ufx_ops_mmap;
56652 + pax_open_kernel();
56653 + *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56654 + pax_close_kernel();
56657 pr_debug("released /dev/fb%d user=%d count=%d",
56658 diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56659 index ff2b873..626a8d5 100644
56660 --- a/drivers/video/fbdev/udlfb.c
56661 +++ b/drivers/video/fbdev/udlfb.c
56662 @@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56663 dlfb_urb_completion(urb);
56666 - atomic_add(bytes_sent, &dev->bytes_sent);
56667 - atomic_add(bytes_identical, &dev->bytes_identical);
56668 - atomic_add(width*height*2, &dev->bytes_rendered);
56669 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56670 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56671 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56672 end_cycles = get_cycles();
56673 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
56674 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56675 >> 10)), /* Kcycles */
56676 &dev->cpu_kcycles_used);
56678 @@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56679 dlfb_urb_completion(urb);
56682 - atomic_add(bytes_sent, &dev->bytes_sent);
56683 - atomic_add(bytes_identical, &dev->bytes_identical);
56684 - atomic_add(bytes_rendered, &dev->bytes_rendered);
56685 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56686 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56687 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56688 end_cycles = get_cycles();
56689 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
56690 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56691 >> 10)), /* Kcycles */
56692 &dev->cpu_kcycles_used);
56694 @@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56695 fb_deferred_io_cleanup(info);
56696 kfree(info->fbdefio);
56697 info->fbdefio = NULL;
56698 - info->fbops->fb_mmap = dlfb_ops_mmap;
56699 + pax_open_kernel();
56700 + *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56701 + pax_close_kernel();
56704 pr_warn("released /dev/fb%d user=%d count=%d\n",
56705 @@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56706 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56707 struct dlfb_data *dev = fb_info->par;
56708 return snprintf(buf, PAGE_SIZE, "%u\n",
56709 - atomic_read(&dev->bytes_rendered));
56710 + atomic_read_unchecked(&dev->bytes_rendered));
56713 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56714 @@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56715 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56716 struct dlfb_data *dev = fb_info->par;
56717 return snprintf(buf, PAGE_SIZE, "%u\n",
56718 - atomic_read(&dev->bytes_identical));
56719 + atomic_read_unchecked(&dev->bytes_identical));
56722 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56723 @@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56724 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56725 struct dlfb_data *dev = fb_info->par;
56726 return snprintf(buf, PAGE_SIZE, "%u\n",
56727 - atomic_read(&dev->bytes_sent));
56728 + atomic_read_unchecked(&dev->bytes_sent));
56731 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56732 @@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56733 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56734 struct dlfb_data *dev = fb_info->par;
56735 return snprintf(buf, PAGE_SIZE, "%u\n",
56736 - atomic_read(&dev->cpu_kcycles_used));
56737 + atomic_read_unchecked(&dev->cpu_kcycles_used));
56740 static ssize_t edid_show(
56741 @@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56742 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56743 struct dlfb_data *dev = fb_info->par;
56745 - atomic_set(&dev->bytes_rendered, 0);
56746 - atomic_set(&dev->bytes_identical, 0);
56747 - atomic_set(&dev->bytes_sent, 0);
56748 - atomic_set(&dev->cpu_kcycles_used, 0);
56749 + atomic_set_unchecked(&dev->bytes_rendered, 0);
56750 + atomic_set_unchecked(&dev->bytes_identical, 0);
56751 + atomic_set_unchecked(&dev->bytes_sent, 0);
56752 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56756 diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56757 index d32d1c4..46722e6 100644
56758 --- a/drivers/video/fbdev/uvesafb.c
56759 +++ b/drivers/video/fbdev/uvesafb.c
56761 #include <linux/io.h>
56762 #include <linux/mutex.h>
56763 #include <linux/slab.h>
56764 +#include <linux/moduleloader.h>
56765 #include <video/edid.h>
56766 #include <video/uvesafb.h>
56768 @@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56769 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56770 par->pmi_setpal = par->ypan = 0;
56773 +#ifdef CONFIG_PAX_KERNEXEC
56774 +#ifdef CONFIG_MODULES
56775 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56777 + if (!par->pmi_code) {
56778 + par->pmi_setpal = par->ypan = 0;
56783 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56784 + task->t.regs.edi);
56786 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56787 + pax_open_kernel();
56788 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56789 + pax_close_kernel();
56791 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56792 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56794 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56795 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56798 printk(KERN_INFO "uvesafb: protected mode interface info at "
56800 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56801 @@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56804 if (par->pmi_setpal || par->ypan) {
56805 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56806 if (__supported_pte_mask & _PAGE_NX) {
56807 par->pmi_setpal = par->ypan = 0;
56808 printk(KERN_WARNING "uvesafb: NX protection is active, "
56809 "better not use the PMI.\n");
56813 uvesafb_vbe_getpmi(task, par);
56817 /* The protected mode interface is not available on non-x86. */
56818 @@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56819 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56821 /* Disable blanking if the user requested so. */
56823 - info->fbops->fb_blank = NULL;
56825 + pax_open_kernel();
56826 + *(void **)&info->fbops->fb_blank = NULL;
56827 + pax_close_kernel();
56831 * Find out how much IO memory is required for the mode with
56832 @@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56833 info->flags = FBINFO_FLAG_DEFAULT |
56834 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56837 - info->fbops->fb_pan_display = NULL;
56838 + if (!par->ypan) {
56839 + pax_open_kernel();
56840 + *(void **)&info->fbops->fb_pan_display = NULL;
56841 + pax_close_kernel();
56845 static void uvesafb_init_mtrr(struct fb_info *info)
56846 @@ -1786,6 +1816,11 @@ out_mode:
56848 kfree(par->vbe_modes);
56850 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56851 + if (par->pmi_code)
56852 + module_memfree_exec(par->pmi_code);
56855 framebuffer_release(info);
56858 @@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56859 kfree(par->vbe_state_orig);
56860 kfree(par->vbe_state_saved);
56862 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56863 + if (par->pmi_code)
56864 + module_memfree_exec(par->pmi_code);
56867 framebuffer_release(info);
56870 diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56871 index d79a0ac..2d0c3d4 100644
56872 --- a/drivers/video/fbdev/vesafb.c
56873 +++ b/drivers/video/fbdev/vesafb.c
56877 #include <linux/module.h>
56878 +#include <linux/moduleloader.h>
56879 #include <linux/kernel.h>
56880 #include <linux/errno.h>
56881 #include <linux/string.h>
56882 @@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56883 static int vram_total; /* Set total amount of memory */
56884 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56885 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56886 -static void (*pmi_start)(void) __read_mostly;
56887 -static void (*pmi_pal) (void) __read_mostly;
56888 +static void (*pmi_start)(void) __read_only;
56889 +static void (*pmi_pal) (void) __read_only;
56890 static int depth __read_mostly;
56891 static int vga_compat __read_mostly;
56892 /* --------------------------------------------------------------------- */
56893 @@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56894 unsigned int size_remap;
56895 unsigned int size_total;
56896 char *option = NULL;
56897 + void *pmi_code = NULL;
56899 /* ignore error return of fb_get_options */
56900 fb_get_options("vesafb", &option);
56901 @@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56902 size_remap = size_total;
56903 vesafb_fix.smem_len = size_remap;
56906 - screen_info.vesapm_seg = 0;
56909 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56910 printk(KERN_WARNING
56911 "vesafb: cannot reserve video memory at 0x%lx\n",
56912 @@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56913 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56914 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56918 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56919 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
56921 +#elif !defined(CONFIG_PAX_KERNEXEC)
56926 + screen_info.vesapm_seg = 0;
56928 if (screen_info.vesapm_seg) {
56929 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56930 - screen_info.vesapm_seg,screen_info.vesapm_off);
56931 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56932 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56935 if (screen_info.vesapm_seg < 0xc000)
56936 @@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56938 if (ypan || pmi_setpal) {
56939 unsigned short *pmi_base;
56941 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56942 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56943 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56945 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56946 + pax_open_kernel();
56947 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56949 + pmi_code = pmi_base;
56952 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56953 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56955 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56956 + pmi_start = ktva_ktla(pmi_start);
56957 + pmi_pal = ktva_ktla(pmi_pal);
56958 + pax_close_kernel();
56961 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56963 printk(KERN_INFO "vesafb: pmi: ports = ");
56964 @@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56965 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56966 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56969 - info->fbops->fb_pan_display = NULL;
56971 + pax_open_kernel();
56972 + *(void **)&info->fbops->fb_pan_display = NULL;
56973 + pax_close_kernel();
56976 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56978 @@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56979 fb_info(info, "%s frame buffer device\n", info->fix.id);
56983 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56984 + module_memfree_exec(pmi_code);
56987 if (info->screen_base)
56988 iounmap(info->screen_base);
56989 framebuffer_release(info);
56990 diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56991 index 88714ae..16c2e11 100644
56992 --- a/drivers/video/fbdev/via/via_clock.h
56993 +++ b/drivers/video/fbdev/via/via_clock.h
56994 @@ -56,7 +56,7 @@ struct via_clock {
56996 void (*set_engine_pll_state)(u8 state);
56997 void (*set_engine_pll)(struct via_pll_config config);
57002 static inline u32 get_pll_internal_frequency(u32 ref_freq,
57003 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
57004 index 3c14e43..2630570 100644
57005 --- a/drivers/video/logo/logo_linux_clut224.ppm
57006 +++ b/drivers/video/logo/logo_linux_clut224.ppm
57007 @@ -2,1603 +2,1123 @@ P3
57008 # Standard 224-color Linux logo
57011 - 0 0 0 0 0 0 0 0 0 0 0 0
57012 - 0 0 0 0 0 0 0 0 0 0 0 0
57013 - 0 0 0 0 0 0 0 0 0 0 0 0
57014 - 0 0 0 0 0 0 0 0 0 0 0 0
57015 - 0 0 0 0 0 0 0 0 0 0 0 0
57016 - 0 0 0 0 0 0 0 0 0 0 0 0
57017 - 0 0 0 0 0 0 0 0 0 0 0 0
57018 - 0 0 0 0 0 0 0 0 0 0 0 0
57019 - 0 0 0 0 0 0 0 0 0 0 0 0
57020 - 6 6 6 6 6 6 10 10 10 10 10 10
57021 - 10 10 10 6 6 6 6 6 6 6 6 6
57022 - 0 0 0 0 0 0 0 0 0 0 0 0
57023 - 0 0 0 0 0 0 0 0 0 0 0 0
57024 - 0 0 0 0 0 0 0 0 0 0 0 0
57025 - 0 0 0 0 0 0 0 0 0 0 0 0
57026 - 0 0 0 0 0 0 0 0 0 0 0 0
57027 - 0 0 0 0 0 0 0 0 0 0 0 0
57028 - 0 0 0 0 0 0 0 0 0 0 0 0
57029 - 0 0 0 0 0 0 0 0 0 0 0 0
57030 - 0 0 0 0 0 0 0 0 0 0 0 0
57031 - 0 0 0 0 0 0 0 0 0 0 0 0
57032 - 0 0 0 0 0 0 0 0 0 0 0 0
57033 - 0 0 0 0 0 0 0 0 0 0 0 0
57034 - 0 0 0 0 0 0 0 0 0 0 0 0
57035 - 0 0 0 0 0 0 0 0 0 0 0 0
57036 - 0 0 0 0 0 0 0 0 0 0 0 0
57037 - 0 0 0 0 0 0 0 0 0 0 0 0
57038 - 0 0 0 0 0 0 0 0 0 0 0 0
57039 - 0 0 0 6 6 6 10 10 10 14 14 14
57040 - 22 22 22 26 26 26 30 30 30 34 34 34
57041 - 30 30 30 30 30 30 26 26 26 18 18 18
57042 - 14 14 14 10 10 10 6 6 6 0 0 0
57043 - 0 0 0 0 0 0 0 0 0 0 0 0
57044 - 0 0 0 0 0 0 0 0 0 0 0 0
57045 - 0 0 0 0 0 0 0 0 0 0 0 0
57046 - 0 0 0 0 0 0 0 0 0 0 0 0
57047 - 0 0 0 0 0 0 0 0 0 0 0 0
57048 - 0 0 0 0 0 0 0 0 0 0 0 0
57049 - 0 0 0 0 0 0 0 0 0 0 0 0
57050 - 0 0 0 0 0 0 0 0 0 0 0 0
57051 - 0 0 0 0 0 0 0 0 0 0 0 0
57052 - 0 0 0 0 0 1 0 0 1 0 0 0
57053 - 0 0 0 0 0 0 0 0 0 0 0 0
57054 - 0 0 0 0 0 0 0 0 0 0 0 0
57055 - 0 0 0 0 0 0 0 0 0 0 0 0
57056 - 0 0 0 0 0 0 0 0 0 0 0 0
57057 - 0 0 0 0 0 0 0 0 0 0 0 0
57058 - 0 0 0 0 0 0 0 0 0 0 0 0
57059 - 6 6 6 14 14 14 26 26 26 42 42 42
57060 - 54 54 54 66 66 66 78 78 78 78 78 78
57061 - 78 78 78 74 74 74 66 66 66 54 54 54
57062 - 42 42 42 26 26 26 18 18 18 10 10 10
57063 - 6 6 6 0 0 0 0 0 0 0 0 0
57064 - 0 0 0 0 0 0 0 0 0 0 0 0
57065 - 0 0 0 0 0 0 0 0 0 0 0 0
57066 - 0 0 0 0 0 0 0 0 0 0 0 0
57067 - 0 0 0 0 0 0 0 0 0 0 0 0
57068 - 0 0 0 0 0 0 0 0 0 0 0 0
57069 - 0 0 0 0 0 0 0 0 0 0 0 0
57070 - 0 0 0 0 0 0 0 0 0 0 0 0
57071 - 0 0 0 0 0 0 0 0 0 0 0 0
57072 - 0 0 1 0 0 0 0 0 0 0 0 0
57073 - 0 0 0 0 0 0 0 0 0 0 0 0
57074 - 0 0 0 0 0 0 0 0 0 0 0 0
57075 - 0 0 0 0 0 0 0 0 0 0 0 0
57076 - 0 0 0 0 0 0 0 0 0 0 0 0
57077 - 0 0 0 0 0 0 0 0 0 0 0 0
57078 - 0 0 0 0 0 0 0 0 0 10 10 10
57079 - 22 22 22 42 42 42 66 66 66 86 86 86
57080 - 66 66 66 38 38 38 38 38 38 22 22 22
57081 - 26 26 26 34 34 34 54 54 54 66 66 66
57082 - 86 86 86 70 70 70 46 46 46 26 26 26
57083 - 14 14 14 6 6 6 0 0 0 0 0 0
57084 - 0 0 0 0 0 0 0 0 0 0 0 0
57085 - 0 0 0 0 0 0 0 0 0 0 0 0
57086 - 0 0 0 0 0 0 0 0 0 0 0 0
57087 - 0 0 0 0 0 0 0 0 0 0 0 0
57088 - 0 0 0 0 0 0 0 0 0 0 0 0
57089 - 0 0 0 0 0 0 0 0 0 0 0 0
57090 - 0 0 0 0 0 0 0 0 0 0 0 0
57091 - 0 0 0 0 0 0 0 0 0 0 0 0
57092 - 0 0 1 0 0 1 0 0 1 0 0 0
57093 - 0 0 0 0 0 0 0 0 0 0 0 0
57094 - 0 0 0 0 0 0 0 0 0 0 0 0
57095 - 0 0 0 0 0 0 0 0 0 0 0 0
57096 - 0 0 0 0 0 0 0 0 0 0 0 0
57097 - 0 0 0 0 0 0 0 0 0 0 0 0
57098 - 0 0 0 0 0 0 10 10 10 26 26 26
57099 - 50 50 50 82 82 82 58 58 58 6 6 6
57100 - 2 2 6 2 2 6 2 2 6 2 2 6
57101 - 2 2 6 2 2 6 2 2 6 2 2 6
57102 - 6 6 6 54 54 54 86 86 86 66 66 66
57103 - 38 38 38 18 18 18 6 6 6 0 0 0
57104 - 0 0 0 0 0 0 0 0 0 0 0 0
57105 - 0 0 0 0 0 0 0 0 0 0 0 0
57106 - 0 0 0 0 0 0 0 0 0 0 0 0
57107 - 0 0 0 0 0 0 0 0 0 0 0 0
57108 - 0 0 0 0 0 0 0 0 0 0 0 0
57109 - 0 0 0 0 0 0 0 0 0 0 0 0
57110 - 0 0 0 0 0 0 0 0 0 0 0 0
57111 - 0 0 0 0 0 0 0 0 0 0 0 0
57112 - 0 0 0 0 0 0 0 0 0 0 0 0
57113 - 0 0 0 0 0 0 0 0 0 0 0 0
57114 - 0 0 0 0 0 0 0 0 0 0 0 0
57115 - 0 0 0 0 0 0 0 0 0 0 0 0
57116 - 0 0 0 0 0 0 0 0 0 0 0 0
57117 - 0 0 0 0 0 0 0 0 0 0 0 0
57118 - 0 0 0 6 6 6 22 22 22 50 50 50
57119 - 78 78 78 34 34 34 2 2 6 2 2 6
57120 - 2 2 6 2 2 6 2 2 6 2 2 6
57121 - 2 2 6 2 2 6 2 2 6 2 2 6
57122 - 2 2 6 2 2 6 6 6 6 70 70 70
57123 - 78 78 78 46 46 46 22 22 22 6 6 6
57124 - 0 0 0 0 0 0 0 0 0 0 0 0
57125 - 0 0 0 0 0 0 0 0 0 0 0 0
57126 - 0 0 0 0 0 0 0 0 0 0 0 0
57127 - 0 0 0 0 0 0 0 0 0 0 0 0
57128 - 0 0 0 0 0 0 0 0 0 0 0 0
57129 - 0 0 0 0 0 0 0 0 0 0 0 0
57130 - 0 0 0 0 0 0 0 0 0 0 0 0
57131 - 0 0 0 0 0 0 0 0 0 0 0 0
57132 - 0 0 1 0 0 1 0 0 1 0 0 0
57133 - 0 0 0 0 0 0 0 0 0 0 0 0
57134 - 0 0 0 0 0 0 0 0 0 0 0 0
57135 - 0 0 0 0 0 0 0 0 0 0 0 0
57136 - 0 0 0 0 0 0 0 0 0 0 0 0
57137 - 0 0 0 0 0 0 0 0 0 0 0 0
57138 - 6 6 6 18 18 18 42 42 42 82 82 82
57139 - 26 26 26 2 2 6 2 2 6 2 2 6
57140 - 2 2 6 2 2 6 2 2 6 2 2 6
57141 - 2 2 6 2 2 6 2 2 6 14 14 14
57142 - 46 46 46 34 34 34 6 6 6 2 2 6
57143 - 42 42 42 78 78 78 42 42 42 18 18 18
57144 - 6 6 6 0 0 0 0 0 0 0 0 0
57145 - 0 0 0 0 0 0 0 0 0 0 0 0
57146 - 0 0 0 0 0 0 0 0 0 0 0 0
57147 - 0 0 0 0 0 0 0 0 0 0 0 0
57148 - 0 0 0 0 0 0 0 0 0 0 0 0
57149 - 0 0 0 0 0 0 0 0 0 0 0 0
57150 - 0 0 0 0 0 0 0 0 0 0 0 0
57151 - 0 0 0 0 0 0 0 0 0 0 0 0
57152 - 0 0 1 0 0 0 0 0 1 0 0 0
57153 - 0 0 0 0 0 0 0 0 0 0 0 0
57154 - 0 0 0 0 0 0 0 0 0 0 0 0
57155 - 0 0 0 0 0 0 0 0 0 0 0 0
57156 - 0 0 0 0 0 0 0 0 0 0 0 0
57157 - 0 0 0 0 0 0 0 0 0 0 0 0
57158 - 10 10 10 30 30 30 66 66 66 58 58 58
57159 - 2 2 6 2 2 6 2 2 6 2 2 6
57160 - 2 2 6 2 2 6 2 2 6 2 2 6
57161 - 2 2 6 2 2 6 2 2 6 26 26 26
57162 - 86 86 86 101 101 101 46 46 46 10 10 10
57163 - 2 2 6 58 58 58 70 70 70 34 34 34
57164 - 10 10 10 0 0 0 0 0 0 0 0 0
57165 - 0 0 0 0 0 0 0 0 0 0 0 0
57166 - 0 0 0 0 0 0 0 0 0 0 0 0
57167 - 0 0 0 0 0 0 0 0 0 0 0 0
57168 - 0 0 0 0 0 0 0 0 0 0 0 0
57169 - 0 0 0 0 0 0 0 0 0 0 0 0
57170 - 0 0 0 0 0 0 0 0 0 0 0 0
57171 - 0 0 0 0 0 0 0 0 0 0 0 0
57172 - 0 0 1 0 0 1 0 0 1 0 0 0
57173 - 0 0 0 0 0 0 0 0 0 0 0 0
57174 - 0 0 0 0 0 0 0 0 0 0 0 0
57175 - 0 0 0 0 0 0 0 0 0 0 0 0
57176 - 0 0 0 0 0 0 0 0 0 0 0 0
57177 - 0 0 0 0 0 0 0 0 0 0 0 0
57178 - 14 14 14 42 42 42 86 86 86 10 10 10
57179 - 2 2 6 2 2 6 2 2 6 2 2 6
57180 - 2 2 6 2 2 6 2 2 6 2 2 6
57181 - 2 2 6 2 2 6 2 2 6 30 30 30
57182 - 94 94 94 94 94 94 58 58 58 26 26 26
57183 - 2 2 6 6 6 6 78 78 78 54 54 54
57184 - 22 22 22 6 6 6 0 0 0 0 0 0
57185 - 0 0 0 0 0 0 0 0 0 0 0 0
57186 - 0 0 0 0 0 0 0 0 0 0 0 0
57187 - 0 0 0 0 0 0 0 0 0 0 0 0
57188 - 0 0 0 0 0 0 0 0 0 0 0 0
57189 - 0 0 0 0 0 0 0 0 0 0 0 0
57190 - 0 0 0 0 0 0 0 0 0 0 0 0
57191 - 0 0 0 0 0 0 0 0 0 0 0 0
57192 - 0 0 0 0 0 0 0 0 0 0 0 0
57193 - 0 0 0 0 0 0 0 0 0 0 0 0
57194 - 0 0 0 0 0 0 0 0 0 0 0 0
57195 - 0 0 0 0 0 0 0 0 0 0 0 0
57196 - 0 0 0 0 0 0 0 0 0 0 0 0
57197 - 0 0 0 0 0 0 0 0 0 6 6 6
57198 - 22 22 22 62 62 62 62 62 62 2 2 6
57199 - 2 2 6 2 2 6 2 2 6 2 2 6
57200 - 2 2 6 2 2 6 2 2 6 2 2 6
57201 - 2 2 6 2 2 6 2 2 6 26 26 26
57202 - 54 54 54 38 38 38 18 18 18 10 10 10
57203 - 2 2 6 2 2 6 34 34 34 82 82 82
57204 - 38 38 38 14 14 14 0 0 0 0 0 0
57205 - 0 0 0 0 0 0 0 0 0 0 0 0
57206 - 0 0 0 0 0 0 0 0 0 0 0 0
57207 - 0 0 0 0 0 0 0 0 0 0 0 0
57208 - 0 0 0 0 0 0 0 0 0 0 0 0
57209 - 0 0 0 0 0 0 0 0 0 0 0 0
57210 - 0 0 0 0 0 0 0 0 0 0 0 0
57211 - 0 0 0 0 0 0 0 0 0 0 0 0
57212 - 0 0 0 0 0 1 0 0 1 0 0 0
57213 - 0 0 0 0 0 0 0 0 0 0 0 0
57214 - 0 0 0 0 0 0 0 0 0 0 0 0
57215 - 0 0 0 0 0 0 0 0 0 0 0 0
57216 - 0 0 0 0 0 0 0 0 0 0 0 0
57217 - 0 0 0 0 0 0 0 0 0 6 6 6
57218 - 30 30 30 78 78 78 30 30 30 2 2 6
57219 - 2 2 6 2 2 6 2 2 6 2 2 6
57220 - 2 2 6 2 2 6 2 2 6 2 2 6
57221 - 2 2 6 2 2 6 2 2 6 10 10 10
57222 - 10 10 10 2 2 6 2 2 6 2 2 6
57223 - 2 2 6 2 2 6 2 2 6 78 78 78
57224 - 50 50 50 18 18 18 6 6 6 0 0 0
57225 - 0 0 0 0 0 0 0 0 0 0 0 0
57226 - 0 0 0 0 0 0 0 0 0 0 0 0
57227 - 0 0 0 0 0 0 0 0 0 0 0 0
57228 - 0 0 0 0 0 0 0 0 0 0 0 0
57229 - 0 0 0 0 0 0 0 0 0 0 0 0
57230 - 0 0 0 0 0 0 0 0 0 0 0 0
57231 - 0 0 0 0 0 0 0 0 0 0 0 0
57232 - 0 0 1 0 0 0 0 0 0 0 0 0
57233 - 0 0 0 0 0 0 0 0 0 0 0 0
57234 - 0 0 0 0 0 0 0 0 0 0 0 0
57235 - 0 0 0 0 0 0 0 0 0 0 0 0
57236 - 0 0 0 0 0 0 0 0 0 0 0 0
57237 - 0 0 0 0 0 0 0 0 0 10 10 10
57238 - 38 38 38 86 86 86 14 14 14 2 2 6
57239 - 2 2 6 2 2 6 2 2 6 2 2 6
57240 - 2 2 6 2 2 6 2 2 6 2 2 6
57241 - 2 2 6 2 2 6 2 2 6 2 2 6
57242 - 2 2 6 2 2 6 2 2 6 2 2 6
57243 - 2 2 6 2 2 6 2 2 6 54 54 54
57244 - 66 66 66 26 26 26 6 6 6 0 0 0
57245 - 0 0 0 0 0 0 0 0 0 0 0 0
57246 - 0 0 0 0 0 0 0 0 0 0 0 0
57247 - 0 0 0 0 0 0 0 0 0 0 0 0
57248 - 0 0 0 0 0 0 0 0 0 0 0 0
57249 - 0 0 0 0 0 0 0 0 0 0 0 0
57250 - 0 0 0 0 0 0 0 0 0 0 0 0
57251 - 0 0 0 0 0 0 0 0 0 0 0 0
57252 - 0 0 0 0 0 1 0 0 1 0 0 0
57253 - 0 0 0 0 0 0 0 0 0 0 0 0
57254 - 0 0 0 0 0 0 0 0 0 0 0 0
57255 - 0 0 0 0 0 0 0 0 0 0 0 0
57256 - 0 0 0 0 0 0 0 0 0 0 0 0
57257 - 0 0 0 0 0 0 0 0 0 14 14 14
57258 - 42 42 42 82 82 82 2 2 6 2 2 6
57259 - 2 2 6 6 6 6 10 10 10 2 2 6
57260 - 2 2 6 2 2 6 2 2 6 2 2 6
57261 - 2 2 6 2 2 6 2 2 6 6 6 6
57262 - 14 14 14 10 10 10 2 2 6 2 2 6
57263 - 2 2 6 2 2 6 2 2 6 18 18 18
57264 - 82 82 82 34 34 34 10 10 10 0 0 0
57265 - 0 0 0 0 0 0 0 0 0 0 0 0
57266 - 0 0 0 0 0 0 0 0 0 0 0 0
57267 - 0 0 0 0 0 0 0 0 0 0 0 0
57268 - 0 0 0 0 0 0 0 0 0 0 0 0
57269 - 0 0 0 0 0 0 0 0 0 0 0 0
57270 - 0 0 0 0 0 0 0 0 0 0 0 0
57271 - 0 0 0 0 0 0 0 0 0 0 0 0
57272 - 0 0 1 0 0 0 0 0 0 0 0 0
57273 - 0 0 0 0 0 0 0 0 0 0 0 0
57274 - 0 0 0 0 0 0 0 0 0 0 0 0
57275 - 0 0 0 0 0 0 0 0 0 0 0 0
57276 - 0 0 0 0 0 0 0 0 0 0 0 0
57277 - 0 0 0 0 0 0 0 0 0 14 14 14
57278 - 46 46 46 86 86 86 2 2 6 2 2 6
57279 - 6 6 6 6 6 6 22 22 22 34 34 34
57280 - 6 6 6 2 2 6 2 2 6 2 2 6
57281 - 2 2 6 2 2 6 18 18 18 34 34 34
57282 - 10 10 10 50 50 50 22 22 22 2 2 6
57283 - 2 2 6 2 2 6 2 2 6 10 10 10
57284 - 86 86 86 42 42 42 14 14 14 0 0 0
57285 - 0 0 0 0 0 0 0 0 0 0 0 0
57286 - 0 0 0 0 0 0 0 0 0 0 0 0
57287 - 0 0 0 0 0 0 0 0 0 0 0 0
57288 - 0 0 0 0 0 0 0 0 0 0 0 0
57289 - 0 0 0 0 0 0 0 0 0 0 0 0
57290 - 0 0 0 0 0 0 0 0 0 0 0 0
57291 - 0 0 0 0 0 0 0 0 0 0 0 0
57292 - 0 0 1 0 0 1 0 0 1 0 0 0
57293 - 0 0 0 0 0 0 0 0 0 0 0 0
57294 - 0 0 0 0 0 0 0 0 0 0 0 0
57295 - 0 0 0 0 0 0 0 0 0 0 0 0
57296 - 0 0 0 0 0 0 0 0 0 0 0 0
57297 - 0 0 0 0 0 0 0 0 0 14 14 14
57298 - 46 46 46 86 86 86 2 2 6 2 2 6
57299 - 38 38 38 116 116 116 94 94 94 22 22 22
57300 - 22 22 22 2 2 6 2 2 6 2 2 6
57301 - 14 14 14 86 86 86 138 138 138 162 162 162
57302 -154 154 154 38 38 38 26 26 26 6 6 6
57303 - 2 2 6 2 2 6 2 2 6 2 2 6
57304 - 86 86 86 46 46 46 14 14 14 0 0 0
57305 - 0 0 0 0 0 0 0 0 0 0 0 0
57306 - 0 0 0 0 0 0 0 0 0 0 0 0
57307 - 0 0 0 0 0 0 0 0 0 0 0 0
57308 - 0 0 0 0 0 0 0 0 0 0 0 0
57309 - 0 0 0 0 0 0 0 0 0 0 0 0
57310 - 0 0 0 0 0 0 0 0 0 0 0 0
57311 - 0 0 0 0 0 0 0 0 0 0 0 0
57312 - 0 0 0 0 0 0 0 0 0 0 0 0
57313 - 0 0 0 0 0 0 0 0 0 0 0 0
57314 - 0 0 0 0 0 0 0 0 0 0 0 0
57315 - 0 0 0 0 0 0 0 0 0 0 0 0
57316 - 0 0 0 0 0 0 0 0 0 0 0 0
57317 - 0 0 0 0 0 0 0 0 0 14 14 14
57318 - 46 46 46 86 86 86 2 2 6 14 14 14
57319 -134 134 134 198 198 198 195 195 195 116 116 116
57320 - 10 10 10 2 2 6 2 2 6 6 6 6
57321 -101 98 89 187 187 187 210 210 210 218 218 218
57322 -214 214 214 134 134 134 14 14 14 6 6 6
57323 - 2 2 6 2 2 6 2 2 6 2 2 6
57324 - 86 86 86 50 50 50 18 18 18 6 6 6
57325 - 0 0 0 0 0 0 0 0 0 0 0 0
57326 - 0 0 0 0 0 0 0 0 0 0 0 0
57327 - 0 0 0 0 0 0 0 0 0 0 0 0
57328 - 0 0 0 0 0 0 0 0 0 0 0 0
57329 - 0 0 0 0 0 0 0 0 0 0 0 0
57330 - 0 0 0 0 0 0 0 0 0 0 0 0
57331 - 0 0 0 0 0 0 0 0 1 0 0 0
57332 - 0 0 1 0 0 1 0 0 1 0 0 0
57333 - 0 0 0 0 0 0 0 0 0 0 0 0
57334 - 0 0 0 0 0 0 0 0 0 0 0 0
57335 - 0 0 0 0 0 0 0 0 0 0 0 0
57336 - 0 0 0 0 0 0 0 0 0 0 0 0
57337 - 0 0 0 0 0 0 0 0 0 14 14 14
57338 - 46 46 46 86 86 86 2 2 6 54 54 54
57339 -218 218 218 195 195 195 226 226 226 246 246 246
57340 - 58 58 58 2 2 6 2 2 6 30 30 30
57341 -210 210 210 253 253 253 174 174 174 123 123 123
57342 -221 221 221 234 234 234 74 74 74 2 2 6
57343 - 2 2 6 2 2 6 2 2 6 2 2 6
57344 - 70 70 70 58 58 58 22 22 22 6 6 6
57345 - 0 0 0 0 0 0 0 0 0 0 0 0
57346 - 0 0 0 0 0 0 0 0 0 0 0 0
57347 - 0 0 0 0 0 0 0 0 0 0 0 0
57348 - 0 0 0 0 0 0 0 0 0 0 0 0
57349 - 0 0 0 0 0 0 0 0 0 0 0 0
57350 - 0 0 0 0 0 0 0 0 0 0 0 0
57351 - 0 0 0 0 0 0 0 0 0 0 0 0
57352 - 0 0 0 0 0 0 0 0 0 0 0 0
57353 - 0 0 0 0 0 0 0 0 0 0 0 0
57354 - 0 0 0 0 0 0 0 0 0 0 0 0
57355 - 0 0 0 0 0 0 0 0 0 0 0 0
57356 - 0 0 0 0 0 0 0 0 0 0 0 0
57357 - 0 0 0 0 0 0 0 0 0 14 14 14
57358 - 46 46 46 82 82 82 2 2 6 106 106 106
57359 -170 170 170 26 26 26 86 86 86 226 226 226
57360 -123 123 123 10 10 10 14 14 14 46 46 46
57361 -231 231 231 190 190 190 6 6 6 70 70 70
57362 - 90 90 90 238 238 238 158 158 158 2 2 6
57363 - 2 2 6 2 2 6 2 2 6 2 2 6
57364 - 70 70 70 58 58 58 22 22 22 6 6 6
57365 - 0 0 0 0 0 0 0 0 0 0 0 0
57366 - 0 0 0 0 0 0 0 0 0 0 0 0
57367 - 0 0 0 0 0 0 0 0 0 0 0 0
57368 - 0 0 0 0 0 0 0 0 0 0 0 0
57369 - 0 0 0 0 0 0 0 0 0 0 0 0
57370 - 0 0 0 0 0 0 0 0 0 0 0 0
57371 - 0 0 0 0 0 0 0 0 1 0 0 0
57372 - 0 0 1 0 0 1 0 0 1 0 0 0
57373 - 0 0 0 0 0 0 0 0 0 0 0 0
57374 - 0 0 0 0 0 0 0 0 0 0 0 0
57375 - 0 0 0 0 0 0 0 0 0 0 0 0
57376 - 0 0 0 0 0 0 0 0 0 0 0 0
57377 - 0 0 0 0 0 0 0 0 0 14 14 14
57378 - 42 42 42 86 86 86 6 6 6 116 116 116
57379 -106 106 106 6 6 6 70 70 70 149 149 149
57380 -128 128 128 18 18 18 38 38 38 54 54 54
57381 -221 221 221 106 106 106 2 2 6 14 14 14
57382 - 46 46 46 190 190 190 198 198 198 2 2 6
57383 - 2 2 6 2 2 6 2 2 6 2 2 6
57384 - 74 74 74 62 62 62 22 22 22 6 6 6
57385 - 0 0 0 0 0 0 0 0 0 0 0 0
57386 - 0 0 0 0 0 0 0 0 0 0 0 0
57387 - 0 0 0 0 0 0 0 0 0 0 0 0
57388 - 0 0 0 0 0 0 0 0 0 0 0 0
57389 - 0 0 0 0 0 0 0 0 0 0 0 0
57390 - 0 0 0 0 0 0 0 0 0 0 0 0
57391 - 0 0 0 0 0 0 0 0 1 0 0 0
57392 - 0 0 1 0 0 0 0 0 1 0 0 0
57393 - 0 0 0 0 0 0 0 0 0 0 0 0
57394 - 0 0 0 0 0 0 0 0 0 0 0 0
57395 - 0 0 0 0 0 0 0 0 0 0 0 0
57396 - 0 0 0 0 0 0 0 0 0 0 0 0
57397 - 0 0 0 0 0 0 0 0 0 14 14 14
57398 - 42 42 42 94 94 94 14 14 14 101 101 101
57399 -128 128 128 2 2 6 18 18 18 116 116 116
57400 -118 98 46 121 92 8 121 92 8 98 78 10
57401 -162 162 162 106 106 106 2 2 6 2 2 6
57402 - 2 2 6 195 195 195 195 195 195 6 6 6
57403 - 2 2 6 2 2 6 2 2 6 2 2 6
57404 - 74 74 74 62 62 62 22 22 22 6 6 6
57405 - 0 0 0 0 0 0 0 0 0 0 0 0
57406 - 0 0 0 0 0 0 0 0 0 0 0 0
57407 - 0 0 0 0 0 0 0 0 0 0 0 0
57408 - 0 0 0 0 0 0 0 0 0 0 0 0
57409 - 0 0 0 0 0 0 0 0 0 0 0 0
57410 - 0 0 0 0 0 0 0 0 0 0 0 0
57411 - 0 0 0 0 0 0 0 0 1 0 0 1
57412 - 0 0 1 0 0 0 0 0 1 0 0 0
57413 - 0 0 0 0 0 0 0 0 0 0 0 0
57414 - 0 0 0 0 0 0 0 0 0 0 0 0
57415 - 0 0 0 0 0 0 0 0 0 0 0 0
57416 - 0 0 0 0 0 0 0 0 0 0 0 0
57417 - 0 0 0 0 0 0 0 0 0 10 10 10
57418 - 38 38 38 90 90 90 14 14 14 58 58 58
57419 -210 210 210 26 26 26 54 38 6 154 114 10
57420 -226 170 11 236 186 11 225 175 15 184 144 12
57421 -215 174 15 175 146 61 37 26 9 2 2 6
57422 - 70 70 70 246 246 246 138 138 138 2 2 6
57423 - 2 2 6 2 2 6 2 2 6 2 2 6
57424 - 70 70 70 66 66 66 26 26 26 6 6 6
57425 - 0 0 0 0 0 0 0 0 0 0 0 0
57426 - 0 0 0 0 0 0 0 0 0 0 0 0
57427 - 0 0 0 0 0 0 0 0 0 0 0 0
57428 - 0 0 0 0 0 0 0 0 0 0 0 0
57429 - 0 0 0 0 0 0 0 0 0 0 0 0
57430 - 0 0 0 0 0 0 0 0 0 0 0 0
57431 - 0 0 0 0 0 0 0 0 0 0 0 0
57432 - 0 0 0 0 0 0 0 0 0 0 0 0
57433 - 0 0 0 0 0 0 0 0 0 0 0 0
57434 - 0 0 0 0 0 0 0 0 0 0 0 0
57435 - 0 0 0 0 0 0 0 0 0 0 0 0
57436 - 0 0 0 0 0 0 0 0 0 0 0 0
57437 - 0 0 0 0 0 0 0 0 0 10 10 10
57438 - 38 38 38 86 86 86 14 14 14 10 10 10
57439 -195 195 195 188 164 115 192 133 9 225 175 15
57440 -239 182 13 234 190 10 232 195 16 232 200 30
57441 -245 207 45 241 208 19 232 195 16 184 144 12
57442 -218 194 134 211 206 186 42 42 42 2 2 6
57443 - 2 2 6 2 2 6 2 2 6 2 2 6
57444 - 50 50 50 74 74 74 30 30 30 6 6 6
57445 - 0 0 0 0 0 0 0 0 0 0 0 0
57446 - 0 0 0 0 0 0 0 0 0 0 0 0
57447 - 0 0 0 0 0 0 0 0 0 0 0 0
57448 - 0 0 0 0 0 0 0 0 0 0 0 0
57449 - 0 0 0 0 0 0 0 0 0 0 0 0
57450 - 0 0 0 0 0 0 0 0 0 0 0 0
57451 - 0 0 0 0 0 0 0 0 0 0 0 0
57452 - 0 0 0 0 0 0 0 0 0 0 0 0
57453 - 0 0 0 0 0 0 0 0 0 0 0 0
57454 - 0 0 0 0 0 0 0 0 0 0 0 0
57455 - 0 0 0 0 0 0 0 0 0 0 0 0
57456 - 0 0 0 0 0 0 0 0 0 0 0 0
57457 - 0 0 0 0 0 0 0 0 0 10 10 10
57458 - 34 34 34 86 86 86 14 14 14 2 2 6
57459 -121 87 25 192 133 9 219 162 10 239 182 13
57460 -236 186 11 232 195 16 241 208 19 244 214 54
57461 -246 218 60 246 218 38 246 215 20 241 208 19
57462 -241 208 19 226 184 13 121 87 25 2 2 6
57463 - 2 2 6 2 2 6 2 2 6 2 2 6
57464 - 50 50 50 82 82 82 34 34 34 10 10 10
57465 - 0 0 0 0 0 0 0 0 0 0 0 0
57466 - 0 0 0 0 0 0 0 0 0 0 0 0
57467 - 0 0 0 0 0 0 0 0 0 0 0 0
57468 - 0 0 0 0 0 0 0 0 0 0 0 0
57469 - 0 0 0 0 0 0 0 0 0 0 0 0
57470 - 0 0 0 0 0 0 0 0 0 0 0 0
57471 - 0 0 0 0 0 0 0 0 0 0 0 0
57472 - 0 0 0 0 0 0 0 0 0 0 0 0
57473 - 0 0 0 0 0 0 0 0 0 0 0 0
57474 - 0 0 0 0 0 0 0 0 0 0 0 0
57475 - 0 0 0 0 0 0 0 0 0 0 0 0
57476 - 0 0 0 0 0 0 0 0 0 0 0 0
57477 - 0 0 0 0 0 0 0 0 0 10 10 10
57478 - 34 34 34 82 82 82 30 30 30 61 42 6
57479 -180 123 7 206 145 10 230 174 11 239 182 13
57480 -234 190 10 238 202 15 241 208 19 246 218 74
57481 -246 218 38 246 215 20 246 215 20 246 215 20
57482 -226 184 13 215 174 15 184 144 12 6 6 6
57483 - 2 2 6 2 2 6 2 2 6 2 2 6
57484 - 26 26 26 94 94 94 42 42 42 14 14 14
57485 - 0 0 0 0 0 0 0 0 0 0 0 0
57486 - 0 0 0 0 0 0 0 0 0 0 0 0
57487 - 0 0 0 0 0 0 0 0 0 0 0 0
57488 - 0 0 0 0 0 0 0 0 0 0 0 0
57489 - 0 0 0 0 0 0 0 0 0 0 0 0
57490 - 0 0 0 0 0 0 0 0 0 0 0 0
57491 - 0 0 0 0 0 0 0 0 0 0 0 0
57492 - 0 0 0 0 0 0 0 0 0 0 0 0
57493 - 0 0 0 0 0 0 0 0 0 0 0 0
57494 - 0 0 0 0 0 0 0 0 0 0 0 0
57495 - 0 0 0 0 0 0 0 0 0 0 0 0
57496 - 0 0 0 0 0 0 0 0 0 0 0 0
57497 - 0 0 0 0 0 0 0 0 0 10 10 10
57498 - 30 30 30 78 78 78 50 50 50 104 69 6
57499 -192 133 9 216 158 10 236 178 12 236 186 11
57500 -232 195 16 241 208 19 244 214 54 245 215 43
57501 -246 215 20 246 215 20 241 208 19 198 155 10
57502 -200 144 11 216 158 10 156 118 10 2 2 6
57503 - 2 2 6 2 2 6 2 2 6 2 2 6
57504 - 6 6 6 90 90 90 54 54 54 18 18 18
57505 - 6 6 6 0 0 0 0 0 0 0 0 0
57506 - 0 0 0 0 0 0 0 0 0 0 0 0
57507 - 0 0 0 0 0 0 0 0 0 0 0 0
57508 - 0 0 0 0 0 0 0 0 0 0 0 0
57509 - 0 0 0 0 0 0 0 0 0 0 0 0
57510 - 0 0 0 0 0 0 0 0 0 0 0 0
57511 - 0 0 0 0 0 0 0 0 0 0 0 0
57512 - 0 0 0 0 0 0 0 0 0 0 0 0
57513 - 0 0 0 0 0 0 0 0 0 0 0 0
57514 - 0 0 0 0 0 0 0 0 0 0 0 0
57515 - 0 0 0 0 0 0 0 0 0 0 0 0
57516 - 0 0 0 0 0 0 0 0 0 0 0 0
57517 - 0 0 0 0 0 0 0 0 0 10 10 10
57518 - 30 30 30 78 78 78 46 46 46 22 22 22
57519 -137 92 6 210 162 10 239 182 13 238 190 10
57520 -238 202 15 241 208 19 246 215 20 246 215 20
57521 -241 208 19 203 166 17 185 133 11 210 150 10
57522 -216 158 10 210 150 10 102 78 10 2 2 6
57523 - 6 6 6 54 54 54 14 14 14 2 2 6
57524 - 2 2 6 62 62 62 74 74 74 30 30 30
57525 - 10 10 10 0 0 0 0 0 0 0 0 0
57526 - 0 0 0 0 0 0 0 0 0 0 0 0
57527 - 0 0 0 0 0 0 0 0 0 0 0 0
57528 - 0 0 0 0 0 0 0 0 0 0 0 0
57529 - 0 0 0 0 0 0 0 0 0 0 0 0
57530 - 0 0 0 0 0 0 0 0 0 0 0 0
57531 - 0 0 0 0 0 0 0 0 0 0 0 0
57532 - 0 0 0 0 0 0 0 0 0 0 0 0
57533 - 0 0 0 0 0 0 0 0 0 0 0 0
57534 - 0 0 0 0 0 0 0 0 0 0 0 0
57535 - 0 0 0 0 0 0 0 0 0 0 0 0
57536 - 0 0 0 0 0 0 0 0 0 0 0 0
57537 - 0 0 0 0 0 0 0 0 0 10 10 10
57538 - 34 34 34 78 78 78 50 50 50 6 6 6
57539 - 94 70 30 139 102 15 190 146 13 226 184 13
57540 -232 200 30 232 195 16 215 174 15 190 146 13
57541 -168 122 10 192 133 9 210 150 10 213 154 11
57542 -202 150 34 182 157 106 101 98 89 2 2 6
57543 - 2 2 6 78 78 78 116 116 116 58 58 58
57544 - 2 2 6 22 22 22 90 90 90 46 46 46
57545 - 18 18 18 6 6 6 0 0 0 0 0 0
57546 - 0 0 0 0 0 0 0 0 0 0 0 0
57547 - 0 0 0 0 0 0 0 0 0 0 0 0
57548 - 0 0 0 0 0 0 0 0 0 0 0 0
57549 - 0 0 0 0 0 0 0 0 0 0 0 0
57550 - 0 0 0 0 0 0 0 0 0 0 0 0
57551 - 0 0 0 0 0 0 0 0 0 0 0 0
57552 - 0 0 0 0 0 0 0 0 0 0 0 0
57553 - 0 0 0 0 0 0 0 0 0 0 0 0
57554 - 0 0 0 0 0 0 0 0 0 0 0 0
57555 - 0 0 0 0 0 0 0 0 0 0 0 0
57556 - 0 0 0 0 0 0 0 0 0 0 0 0
57557 - 0 0 0 0 0 0 0 0 0 10 10 10
57558 - 38 38 38 86 86 86 50 50 50 6 6 6
57559 -128 128 128 174 154 114 156 107 11 168 122 10
57560 -198 155 10 184 144 12 197 138 11 200 144 11
57561 -206 145 10 206 145 10 197 138 11 188 164 115
57562 -195 195 195 198 198 198 174 174 174 14 14 14
57563 - 2 2 6 22 22 22 116 116 116 116 116 116
57564 - 22 22 22 2 2 6 74 74 74 70 70 70
57565 - 30 30 30 10 10 10 0 0 0 0 0 0
57566 - 0 0 0 0 0 0 0 0 0 0 0 0
57567 - 0 0 0 0 0 0 0 0 0 0 0 0
57568 - 0 0 0 0 0 0 0 0 0 0 0 0
57569 - 0 0 0 0 0 0 0 0 0 0 0 0
57570 - 0 0 0 0 0 0 0 0 0 0 0 0
57571 - 0 0 0 0 0 0 0 0 0 0 0 0
57572 - 0 0 0 0 0 0 0 0 0 0 0 0
57573 - 0 0 0 0 0 0 0 0 0 0 0 0
57574 - 0 0 0 0 0 0 0 0 0 0 0 0
57575 - 0 0 0 0 0 0 0 0 0 0 0 0
57576 - 0 0 0 0 0 0 0 0 0 0 0 0
57577 - 0 0 0 0 0 0 6 6 6 18 18 18
57578 - 50 50 50 101 101 101 26 26 26 10 10 10
57579 -138 138 138 190 190 190 174 154 114 156 107 11
57580 -197 138 11 200 144 11 197 138 11 192 133 9
57581 -180 123 7 190 142 34 190 178 144 187 187 187
57582 -202 202 202 221 221 221 214 214 214 66 66 66
57583 - 2 2 6 2 2 6 50 50 50 62 62 62
57584 - 6 6 6 2 2 6 10 10 10 90 90 90
57585 - 50 50 50 18 18 18 6 6 6 0 0 0
57586 - 0 0 0 0 0 0 0 0 0 0 0 0
57587 - 0 0 0 0 0 0 0 0 0 0 0 0
57588 - 0 0 0 0 0 0 0 0 0 0 0 0
57589 - 0 0 0 0 0 0 0 0 0 0 0 0
57590 - 0 0 0 0 0 0 0 0 0 0 0 0
57591 - 0 0 0 0 0 0 0 0 0 0 0 0
57592 - 0 0 0 0 0 0 0 0 0 0 0 0
57593 - 0 0 0 0 0 0 0 0 0 0 0 0
57594 - 0 0 0 0 0 0 0 0 0 0 0 0
57595 - 0 0 0 0 0 0 0 0 0 0 0 0
57596 - 0 0 0 0 0 0 0 0 0 0 0 0
57597 - 0 0 0 0 0 0 10 10 10 34 34 34
57598 - 74 74 74 74 74 74 2 2 6 6 6 6
57599 -144 144 144 198 198 198 190 190 190 178 166 146
57600 -154 121 60 156 107 11 156 107 11 168 124 44
57601 -174 154 114 187 187 187 190 190 190 210 210 210
57602 -246 246 246 253 253 253 253 253 253 182 182 182
57603 - 6 6 6 2 2 6 2 2 6 2 2 6
57604 - 2 2 6 2 2 6 2 2 6 62 62 62
57605 - 74 74 74 34 34 34 14 14 14 0 0 0
57606 - 0 0 0 0 0 0 0 0 0 0 0 0
57607 - 0 0 0 0 0 0 0 0 0 0 0 0
57608 - 0 0 0 0 0 0 0 0 0 0 0 0
57609 - 0 0 0 0 0 0 0 0 0 0 0 0
57610 - 0 0 0 0 0 0 0 0 0 0 0 0
57611 - 0 0 0 0 0 0 0 0 0 0 0 0
57612 - 0 0 0 0 0 0 0 0 0 0 0 0
57613 - 0 0 0 0 0 0 0 0 0 0 0 0
57614 - 0 0 0 0 0 0 0 0 0 0 0 0
57615 - 0 0 0 0 0 0 0 0 0 0 0 0
57616 - 0 0 0 0 0 0 0 0 0 0 0 0
57617 - 0 0 0 10 10 10 22 22 22 54 54 54
57618 - 94 94 94 18 18 18 2 2 6 46 46 46
57619 -234 234 234 221 221 221 190 190 190 190 190 190
57620 -190 190 190 187 187 187 187 187 187 190 190 190
57621 -190 190 190 195 195 195 214 214 214 242 242 242
57622 -253 253 253 253 253 253 253 253 253 253 253 253
57623 - 82 82 82 2 2 6 2 2 6 2 2 6
57624 - 2 2 6 2 2 6 2 2 6 14 14 14
57625 - 86 86 86 54 54 54 22 22 22 6 6 6
57626 - 0 0 0 0 0 0 0 0 0 0 0 0
57627 - 0 0 0 0 0 0 0 0 0 0 0 0
57628 - 0 0 0 0 0 0 0 0 0 0 0 0
57629 - 0 0 0 0 0 0 0 0 0 0 0 0
57630 - 0 0 0 0 0 0 0 0 0 0 0 0
57631 - 0 0 0 0 0 0 0 0 0 0 0 0
57632 - 0 0 0 0 0 0 0 0 0 0 0 0
57633 - 0 0 0 0 0 0 0 0 0 0 0 0
57634 - 0 0 0 0 0 0 0 0 0 0 0 0
57635 - 0 0 0 0 0 0 0 0 0 0 0 0
57636 - 0 0 0 0 0 0 0 0 0 0 0 0
57637 - 6 6 6 18 18 18 46 46 46 90 90 90
57638 - 46 46 46 18 18 18 6 6 6 182 182 182
57639 -253 253 253 246 246 246 206 206 206 190 190 190
57640 -190 190 190 190 190 190 190 190 190 190 190 190
57641 -206 206 206 231 231 231 250 250 250 253 253 253
57642 -253 253 253 253 253 253 253 253 253 253 253 253
57643 -202 202 202 14 14 14 2 2 6 2 2 6
57644 - 2 2 6 2 2 6 2 2 6 2 2 6
57645 - 42 42 42 86 86 86 42 42 42 18 18 18
57646 - 6 6 6 0 0 0 0 0 0 0 0 0
57647 - 0 0 0 0 0 0 0 0 0 0 0 0
57648 - 0 0 0 0 0 0 0 0 0 0 0 0
57649 - 0 0 0 0 0 0 0 0 0 0 0 0
57650 - 0 0 0 0 0 0 0 0 0 0 0 0
57651 - 0 0 0 0 0 0 0 0 0 0 0 0
57652 - 0 0 0 0 0 0 0 0 0 0 0 0
57653 - 0 0 0 0 0 0 0 0 0 0 0 0
57654 - 0 0 0 0 0 0 0 0 0 0 0 0
57655 - 0 0 0 0 0 0 0 0 0 0 0 0
57656 - 0 0 0 0 0 0 0 0 0 6 6 6
57657 - 14 14 14 38 38 38 74 74 74 66 66 66
57658 - 2 2 6 6 6 6 90 90 90 250 250 250
57659 -253 253 253 253 253 253 238 238 238 198 198 198
57660 -190 190 190 190 190 190 195 195 195 221 221 221
57661 -246 246 246 253 253 253 253 253 253 253 253 253
57662 -253 253 253 253 253 253 253 253 253 253 253 253
57663 -253 253 253 82 82 82 2 2 6 2 2 6
57664 - 2 2 6 2 2 6 2 2 6 2 2 6
57665 - 2 2 6 78 78 78 70 70 70 34 34 34
57666 - 14 14 14 6 6 6 0 0 0 0 0 0
57667 - 0 0 0 0 0 0 0 0 0 0 0 0
57668 - 0 0 0 0 0 0 0 0 0 0 0 0
57669 - 0 0 0 0 0 0 0 0 0 0 0 0
57670 - 0 0 0 0 0 0 0 0 0 0 0 0
57671 - 0 0 0 0 0 0 0 0 0 0 0 0
57672 - 0 0 0 0 0 0 0 0 0 0 0 0
57673 - 0 0 0 0 0 0 0 0 0 0 0 0
57674 - 0 0 0 0 0 0 0 0 0 0 0 0
57675 - 0 0 0 0 0 0 0 0 0 0 0 0
57676 - 0 0 0 0 0 0 0 0 0 14 14 14
57677 - 34 34 34 66 66 66 78 78 78 6 6 6
57678 - 2 2 6 18 18 18 218 218 218 253 253 253
57679 -253 253 253 253 253 253 253 253 253 246 246 246
57680 -226 226 226 231 231 231 246 246 246 253 253 253
57681 -253 253 253 253 253 253 253 253 253 253 253 253
57682 -253 253 253 253 253 253 253 253 253 253 253 253
57683 -253 253 253 178 178 178 2 2 6 2 2 6
57684 - 2 2 6 2 2 6 2 2 6 2 2 6
57685 - 2 2 6 18 18 18 90 90 90 62 62 62
57686 - 30 30 30 10 10 10 0 0 0 0 0 0
57687 - 0 0 0 0 0 0 0 0 0 0 0 0
57688 - 0 0 0 0 0 0 0 0 0 0 0 0
57689 - 0 0 0 0 0 0 0 0 0 0 0 0
57690 - 0 0 0 0 0 0 0 0 0 0 0 0
57691 - 0 0 0 0 0 0 0 0 0 0 0 0
57692 - 0 0 0 0 0 0 0 0 0 0 0 0
57693 - 0 0 0 0 0 0 0 0 0 0 0 0
57694 - 0 0 0 0 0 0 0 0 0 0 0 0
57695 - 0 0 0 0 0 0 0 0 0 0 0 0
57696 - 0 0 0 0 0 0 10 10 10 26 26 26
57697 - 58 58 58 90 90 90 18 18 18 2 2 6
57698 - 2 2 6 110 110 110 253 253 253 253 253 253
57699 -253 253 253 253 253 253 253 253 253 253 253 253
57700 -250 250 250 253 253 253 253 253 253 253 253 253
57701 -253 253 253 253 253 253 253 253 253 253 253 253
57702 -253 253 253 253 253 253 253 253 253 253 253 253
57703 -253 253 253 231 231 231 18 18 18 2 2 6
57704 - 2 2 6 2 2 6 2 2 6 2 2 6
57705 - 2 2 6 2 2 6 18 18 18 94 94 94
57706 - 54 54 54 26 26 26 10 10 10 0 0 0
57707 - 0 0 0 0 0 0 0 0 0 0 0 0
57708 - 0 0 0 0 0 0 0 0 0 0 0 0
57709 - 0 0 0 0 0 0 0 0 0 0 0 0
57710 - 0 0 0 0 0 0 0 0 0 0 0 0
57711 - 0 0 0 0 0 0 0 0 0 0 0 0
57712 - 0 0 0 0 0 0 0 0 0 0 0 0
57713 - 0 0 0 0 0 0 0 0 0 0 0 0
57714 - 0 0 0 0 0 0 0 0 0 0 0 0
57715 - 0 0 0 0 0 0 0 0 0 0 0 0
57716 - 0 0 0 6 6 6 22 22 22 50 50 50
57717 - 90 90 90 26 26 26 2 2 6 2 2 6
57718 - 14 14 14 195 195 195 250 250 250 253 253 253
57719 -253 253 253 253 253 253 253 253 253 253 253 253
57720 -253 253 253 253 253 253 253 253 253 253 253 253
57721 -253 253 253 253 253 253 253 253 253 253 253 253
57722 -253 253 253 253 253 253 253 253 253 253 253 253
57723 -250 250 250 242 242 242 54 54 54 2 2 6
57724 - 2 2 6 2 2 6 2 2 6 2 2 6
57725 - 2 2 6 2 2 6 2 2 6 38 38 38
57726 - 86 86 86 50 50 50 22 22 22 6 6 6
57727 - 0 0 0 0 0 0 0 0 0 0 0 0
57728 - 0 0 0 0 0 0 0 0 0 0 0 0
57729 - 0 0 0 0 0 0 0 0 0 0 0 0
57730 - 0 0 0 0 0 0 0 0 0 0 0 0
57731 - 0 0 0 0 0 0 0 0 0 0 0 0
57732 - 0 0 0 0 0 0 0 0 0 0 0 0
57733 - 0 0 0 0 0 0 0 0 0 0 0 0
57734 - 0 0 0 0 0 0 0 0 0 0 0 0
57735 - 0 0 0 0 0 0 0 0 0 0 0 0
57736 - 6 6 6 14 14 14 38 38 38 82 82 82
57737 - 34 34 34 2 2 6 2 2 6 2 2 6
57738 - 42 42 42 195 195 195 246 246 246 253 253 253
57739 -253 253 253 253 253 253 253 253 253 250 250 250
57740 -242 242 242 242 242 242 250 250 250 253 253 253
57741 -253 253 253 253 253 253 253 253 253 253 253 253
57742 -253 253 253 250 250 250 246 246 246 238 238 238
57743 -226 226 226 231 231 231 101 101 101 6 6 6
57744 - 2 2 6 2 2 6 2 2 6 2 2 6
57745 - 2 2 6 2 2 6 2 2 6 2 2 6
57746 - 38 38 38 82 82 82 42 42 42 14 14 14
57747 - 6 6 6 0 0 0 0 0 0 0 0 0
57748 - 0 0 0 0 0 0 0 0 0 0 0 0
57749 - 0 0 0 0 0 0 0 0 0 0 0 0
57750 - 0 0 0 0 0 0 0 0 0 0 0 0
57751 - 0 0 0 0 0 0 0 0 0 0 0 0
57752 - 0 0 0 0 0 0 0 0 0 0 0 0
57753 - 0 0 0 0 0 0 0 0 0 0 0 0
57754 - 0 0 0 0 0 0 0 0 0 0 0 0
57755 - 0 0 0 0 0 0 0 0 0 0 0 0
57756 - 10 10 10 26 26 26 62 62 62 66 66 66
57757 - 2 2 6 2 2 6 2 2 6 6 6 6
57758 - 70 70 70 170 170 170 206 206 206 234 234 234
57759 -246 246 246 250 250 250 250 250 250 238 238 238
57760 -226 226 226 231 231 231 238 238 238 250 250 250
57761 -250 250 250 250 250 250 246 246 246 231 231 231
57762 -214 214 214 206 206 206 202 202 202 202 202 202
57763 -198 198 198 202 202 202 182 182 182 18 18 18
57764 - 2 2 6 2 2 6 2 2 6 2 2 6
57765 - 2 2 6 2 2 6 2 2 6 2 2 6
57766 - 2 2 6 62 62 62 66 66 66 30 30 30
57767 - 10 10 10 0 0 0 0 0 0 0 0 0
57768 - 0 0 0 0 0 0 0 0 0 0 0 0
57769 - 0 0 0 0 0 0 0 0 0 0 0 0
57770 - 0 0 0 0 0 0 0 0 0 0 0 0
57771 - 0 0 0 0 0 0 0 0 0 0 0 0
57772 - 0 0 0 0 0 0 0 0 0 0 0 0
57773 - 0 0 0 0 0 0 0 0 0 0 0 0
57774 - 0 0 0 0 0 0 0 0 0 0 0 0
57775 - 0 0 0 0 0 0 0 0 0 0 0 0
57776 - 14 14 14 42 42 42 82 82 82 18 18 18
57777 - 2 2 6 2 2 6 2 2 6 10 10 10
57778 - 94 94 94 182 182 182 218 218 218 242 242 242
57779 -250 250 250 253 253 253 253 253 253 250 250 250
57780 -234 234 234 253 253 253 253 253 253 253 253 253
57781 -253 253 253 253 253 253 253 253 253 246 246 246
57782 -238 238 238 226 226 226 210 210 210 202 202 202
57783 -195 195 195 195 195 195 210 210 210 158 158 158
57784 - 6 6 6 14 14 14 50 50 50 14 14 14
57785 - 2 2 6 2 2 6 2 2 6 2 2 6
57786 - 2 2 6 6 6 6 86 86 86 46 46 46
57787 - 18 18 18 6 6 6 0 0 0 0 0 0
57788 - 0 0 0 0 0 0 0 0 0 0 0 0
57789 - 0 0 0 0 0 0 0 0 0 0 0 0
57790 - 0 0 0 0 0 0 0 0 0 0 0 0
57791 - 0 0 0 0 0 0 0 0 0 0 0 0
57792 - 0 0 0 0 0 0 0 0 0 0 0 0
57793 - 0 0 0 0 0 0 0 0 0 0 0 0
57794 - 0 0 0 0 0 0 0 0 0 0 0 0
57795 - 0 0 0 0 0 0 0 0 0 6 6 6
57796 - 22 22 22 54 54 54 70 70 70 2 2 6
57797 - 2 2 6 10 10 10 2 2 6 22 22 22
57798 -166 166 166 231 231 231 250 250 250 253 253 253
57799 -253 253 253 253 253 253 253 253 253 250 250 250
57800 -242 242 242 253 253 253 253 253 253 253 253 253
57801 -253 253 253 253 253 253 253 253 253 253 253 253
57802 -253 253 253 253 253 253 253 253 253 246 246 246
57803 -231 231 231 206 206 206 198 198 198 226 226 226
57804 - 94 94 94 2 2 6 6 6 6 38 38 38
57805 - 30 30 30 2 2 6 2 2 6 2 2 6
57806 - 2 2 6 2 2 6 62 62 62 66 66 66
57807 - 26 26 26 10 10 10 0 0 0 0 0 0
57808 - 0 0 0 0 0 0 0 0 0 0 0 0
57809 - 0 0 0 0 0 0 0 0 0 0 0 0
57810 - 0 0 0 0 0 0 0 0 0 0 0 0
57811 - 0 0 0 0 0 0 0 0 0 0 0 0
57812 - 0 0 0 0 0 0 0 0 0 0 0 0
57813 - 0 0 0 0 0 0 0 0 0 0 0 0
57814 - 0 0 0 0 0 0 0 0 0 0 0 0
57815 - 0 0 0 0 0 0 0 0 0 10 10 10
57816 - 30 30 30 74 74 74 50 50 50 2 2 6
57817 - 26 26 26 26 26 26 2 2 6 106 106 106
57818 -238 238 238 253 253 253 253 253 253 253 253 253
57819 -253 253 253 253 253 253 253 253 253 253 253 253
57820 -253 253 253 253 253 253 253 253 253 253 253 253
57821 -253 253 253 253 253 253 253 253 253 253 253 253
57822 -253 253 253 253 253 253 253 253 253 253 253 253
57823 -253 253 253 246 246 246 218 218 218 202 202 202
57824 -210 210 210 14 14 14 2 2 6 2 2 6
57825 - 30 30 30 22 22 22 2 2 6 2 2 6
57826 - 2 2 6 2 2 6 18 18 18 86 86 86
57827 - 42 42 42 14 14 14 0 0 0 0 0 0
57828 - 0 0 0 0 0 0 0 0 0 0 0 0
57829 - 0 0 0 0 0 0 0 0 0 0 0 0
57830 - 0 0 0 0 0 0 0 0 0 0 0 0
57831 - 0 0 0 0 0 0 0 0 0 0 0 0
57832 - 0 0 0 0 0 0 0 0 0 0 0 0
57833 - 0 0 0 0 0 0 0 0 0 0 0 0
57834 - 0 0 0 0 0 0 0 0 0 0 0 0
57835 - 0 0 0 0 0 0 0 0 0 14 14 14
57836 - 42 42 42 90 90 90 22 22 22 2 2 6
57837 - 42 42 42 2 2 6 18 18 18 218 218 218
57838 -253 253 253 253 253 253 253 253 253 253 253 253
57839 -253 253 253 253 253 253 253 253 253 253 253 253
57840 -253 253 253 253 253 253 253 253 253 253 253 253
57841 -253 253 253 253 253 253 253 253 253 253 253 253
57842 -253 253 253 253 253 253 253 253 253 253 253 253
57843 -253 253 253 253 253 253 250 250 250 221 221 221
57844 -218 218 218 101 101 101 2 2 6 14 14 14
57845 - 18 18 18 38 38 38 10 10 10 2 2 6
57846 - 2 2 6 2 2 6 2 2 6 78 78 78
57847 - 58 58 58 22 22 22 6 6 6 0 0 0
57848 - 0 0 0 0 0 0 0 0 0 0 0 0
57849 - 0 0 0 0 0 0 0 0 0 0 0 0
57850 - 0 0 0 0 0 0 0 0 0 0 0 0
57851 - 0 0 0 0 0 0 0 0 0 0 0 0
57852 - 0 0 0 0 0 0 0 0 0 0 0 0
57853 - 0 0 0 0 0 0 0 0 0 0 0 0
57854 - 0 0 0 0 0 0 0 0 0 0 0 0
57855 - 0 0 0 0 0 0 6 6 6 18 18 18
57856 - 54 54 54 82 82 82 2 2 6 26 26 26
57857 - 22 22 22 2 2 6 123 123 123 253 253 253
57858 -253 253 253 253 253 253 253 253 253 253 253 253
57859 -253 253 253 253 253 253 253 253 253 253 253 253
57860 -253 253 253 253 253 253 253 253 253 253 253 253
57861 -253 253 253 253 253 253 253 253 253 253 253 253
57862 -253 253 253 253 253 253 253 253 253 253 253 253
57863 -253 253 253 253 253 253 253 253 253 250 250 250
57864 -238 238 238 198 198 198 6 6 6 38 38 38
57865 - 58 58 58 26 26 26 38 38 38 2 2 6
57866 - 2 2 6 2 2 6 2 2 6 46 46 46
57867 - 78 78 78 30 30 30 10 10 10 0 0 0
57868 - 0 0 0 0 0 0 0 0 0 0 0 0
57869 - 0 0 0 0 0 0 0 0 0 0 0 0
57870 - 0 0 0 0 0 0 0 0 0 0 0 0
57871 - 0 0 0 0 0 0 0 0 0 0 0 0
57872 - 0 0 0 0 0 0 0 0 0 0 0 0
57873 - 0 0 0 0 0 0 0 0 0 0 0 0
57874 - 0 0 0 0 0 0 0 0 0 0 0 0
57875 - 0 0 0 0 0 0 10 10 10 30 30 30
57876 - 74 74 74 58 58 58 2 2 6 42 42 42
57877 - 2 2 6 22 22 22 231 231 231 253 253 253
57878 -253 253 253 253 253 253 253 253 253 253 253 253
57879 -253 253 253 253 253 253 253 253 253 250 250 250
57880 -253 253 253 253 253 253 253 253 253 253 253 253
57881 -253 253 253 253 253 253 253 253 253 253 253 253
57882 -253 253 253 253 253 253 253 253 253 253 253 253
57883 -253 253 253 253 253 253 253 253 253 253 253 253
57884 -253 253 253 246 246 246 46 46 46 38 38 38
57885 - 42 42 42 14 14 14 38 38 38 14 14 14
57886 - 2 2 6 2 2 6 2 2 6 6 6 6
57887 - 86 86 86 46 46 46 14 14 14 0 0 0
57888 - 0 0 0 0 0 0 0 0 0 0 0 0
57889 - 0 0 0 0 0 0 0 0 0 0 0 0
57890 - 0 0 0 0 0 0 0 0 0 0 0 0
57891 - 0 0 0 0 0 0 0 0 0 0 0 0
57892 - 0 0 0 0 0 0 0 0 0 0 0 0
57893 - 0 0 0 0 0 0 0 0 0 0 0 0
57894 - 0 0 0 0 0 0 0 0 0 0 0 0
57895 - 0 0 0 6 6 6 14 14 14 42 42 42
57896 - 90 90 90 18 18 18 18 18 18 26 26 26
57897 - 2 2 6 116 116 116 253 253 253 253 253 253
57898 -253 253 253 253 253 253 253 253 253 253 253 253
57899 -253 253 253 253 253 253 250 250 250 238 238 238
57900 -253 253 253 253 253 253 253 253 253 253 253 253
57901 -253 253 253 253 253 253 253 253 253 253 253 253
57902 -253 253 253 253 253 253 253 253 253 253 253 253
57903 -253 253 253 253 253 253 253 253 253 253 253 253
57904 -253 253 253 253 253 253 94 94 94 6 6 6
57905 - 2 2 6 2 2 6 10 10 10 34 34 34
57906 - 2 2 6 2 2 6 2 2 6 2 2 6
57907 - 74 74 74 58 58 58 22 22 22 6 6 6
57908 - 0 0 0 0 0 0 0 0 0 0 0 0
57909 - 0 0 0 0 0 0 0 0 0 0 0 0
57910 - 0 0 0 0 0 0 0 0 0 0 0 0
57911 - 0 0 0 0 0 0 0 0 0 0 0 0
57912 - 0 0 0 0 0 0 0 0 0 0 0 0
57913 - 0 0 0 0 0 0 0 0 0 0 0 0
57914 - 0 0 0 0 0 0 0 0 0 0 0 0
57915 - 0 0 0 10 10 10 26 26 26 66 66 66
57916 - 82 82 82 2 2 6 38 38 38 6 6 6
57917 - 14 14 14 210 210 210 253 253 253 253 253 253
57918 -253 253 253 253 253 253 253 253 253 253 253 253
57919 -253 253 253 253 253 253 246 246 246 242 242 242
57920 -253 253 253 253 253 253 253 253 253 253 253 253
57921 -253 253 253 253 253 253 253 253 253 253 253 253
57922 -253 253 253 253 253 253 253 253 253 253 253 253
57923 -253 253 253 253 253 253 253 253 253 253 253 253
57924 -253 253 253 253 253 253 144 144 144 2 2 6
57925 - 2 2 6 2 2 6 2 2 6 46 46 46
57926 - 2 2 6 2 2 6 2 2 6 2 2 6
57927 - 42 42 42 74 74 74 30 30 30 10 10 10
57928 - 0 0 0 0 0 0 0 0 0 0 0 0
57929 - 0 0 0 0 0 0 0 0 0 0 0 0
57930 - 0 0 0 0 0 0 0 0 0 0 0 0
57931 - 0 0 0 0 0 0 0 0 0 0 0 0
57932 - 0 0 0 0 0 0 0 0 0 0 0 0
57933 - 0 0 0 0 0 0 0 0 0 0 0 0
57934 - 0 0 0 0 0 0 0 0 0 0 0 0
57935 - 6 6 6 14 14 14 42 42 42 90 90 90
57936 - 26 26 26 6 6 6 42 42 42 2 2 6
57937 - 74 74 74 250 250 250 253 253 253 253 253 253
57938 -253 253 253 253 253 253 253 253 253 253 253 253
57939 -253 253 253 253 253 253 242 242 242 242 242 242
57940 -253 253 253 253 253 253 253 253 253 253 253 253
57941 -253 253 253 253 253 253 253 253 253 253 253 253
57942 -253 253 253 253 253 253 253 253 253 253 253 253
57943 -253 253 253 253 253 253 253 253 253 253 253 253
57944 -253 253 253 253 253 253 182 182 182 2 2 6
57945 - 2 2 6 2 2 6 2 2 6 46 46 46
57946 - 2 2 6 2 2 6 2 2 6 2 2 6
57947 - 10 10 10 86 86 86 38 38 38 10 10 10
57948 - 0 0 0 0 0 0 0 0 0 0 0 0
57949 - 0 0 0 0 0 0 0 0 0 0 0 0
57950 - 0 0 0 0 0 0 0 0 0 0 0 0
57951 - 0 0 0 0 0 0 0 0 0 0 0 0
57952 - 0 0 0 0 0 0 0 0 0 0 0 0
57953 - 0 0 0 0 0 0 0 0 0 0 0 0
57954 - 0 0 0 0 0 0 0 0 0 0 0 0
57955 - 10 10 10 26 26 26 66 66 66 82 82 82
57956 - 2 2 6 22 22 22 18 18 18 2 2 6
57957 -149 149 149 253 253 253 253 253 253 253 253 253
57958 -253 253 253 253 253 253 253 253 253 253 253 253
57959 -253 253 253 253 253 253 234 234 234 242 242 242
57960 -253 253 253 253 253 253 253 253 253 253 253 253
57961 -253 253 253 253 253 253 253 253 253 253 253 253
57962 -253 253 253 253 253 253 253 253 253 253 253 253
57963 -253 253 253 253 253 253 253 253 253 253 253 253
57964 -253 253 253 253 253 253 206 206 206 2 2 6
57965 - 2 2 6 2 2 6 2 2 6 38 38 38
57966 - 2 2 6 2 2 6 2 2 6 2 2 6
57967 - 6 6 6 86 86 86 46 46 46 14 14 14
57968 - 0 0 0 0 0 0 0 0 0 0 0 0
57969 - 0 0 0 0 0 0 0 0 0 0 0 0
57970 - 0 0 0 0 0 0 0 0 0 0 0 0
57971 - 0 0 0 0 0 0 0 0 0 0 0 0
57972 - 0 0 0 0 0 0 0 0 0 0 0 0
57973 - 0 0 0 0 0 0 0 0 0 0 0 0
57974 - 0 0 0 0 0 0 0 0 0 6 6 6
57975 - 18 18 18 46 46 46 86 86 86 18 18 18
57976 - 2 2 6 34 34 34 10 10 10 6 6 6
57977 -210 210 210 253 253 253 253 253 253 253 253 253
57978 -253 253 253 253 253 253 253 253 253 253 253 253
57979 -253 253 253 253 253 253 234 234 234 242 242 242
57980 -253 253 253 253 253 253 253 253 253 253 253 253
57981 -253 253 253 253 253 253 253 253 253 253 253 253
57982 -253 253 253 253 253 253 253 253 253 253 253 253
57983 -253 253 253 253 253 253 253 253 253 253 253 253
57984 -253 253 253 253 253 253 221 221 221 6 6 6
57985 - 2 2 6 2 2 6 6 6 6 30 30 30
57986 - 2 2 6 2 2 6 2 2 6 2 2 6
57987 - 2 2 6 82 82 82 54 54 54 18 18 18
57988 - 6 6 6 0 0 0 0 0 0 0 0 0
57989 - 0 0 0 0 0 0 0 0 0 0 0 0
57990 - 0 0 0 0 0 0 0 0 0 0 0 0
57991 - 0 0 0 0 0 0 0 0 0 0 0 0
57992 - 0 0 0 0 0 0 0 0 0 0 0 0
57993 - 0 0 0 0 0 0 0 0 0 0 0 0
57994 - 0 0 0 0 0 0 0 0 0 10 10 10
57995 - 26 26 26 66 66 66 62 62 62 2 2 6
57996 - 2 2 6 38 38 38 10 10 10 26 26 26
57997 -238 238 238 253 253 253 253 253 253 253 253 253
57998 -253 253 253 253 253 253 253 253 253 253 253 253
57999 -253 253 253 253 253 253 231 231 231 238 238 238
58000 -253 253 253 253 253 253 253 253 253 253 253 253
58001 -253 253 253 253 253 253 253 253 253 253 253 253
58002 -253 253 253 253 253 253 253 253 253 253 253 253
58003 -253 253 253 253 253 253 253 253 253 253 253 253
58004 -253 253 253 253 253 253 231 231 231 6 6 6
58005 - 2 2 6 2 2 6 10 10 10 30 30 30
58006 - 2 2 6 2 2 6 2 2 6 2 2 6
58007 - 2 2 6 66 66 66 58 58 58 22 22 22
58008 - 6 6 6 0 0 0 0 0 0 0 0 0
58009 - 0 0 0 0 0 0 0 0 0 0 0 0
58010 - 0 0 0 0 0 0 0 0 0 0 0 0
58011 - 0 0 0 0 0 0 0 0 0 0 0 0
58012 - 0 0 0 0 0 0 0 0 0 0 0 0
58013 - 0 0 0 0 0 0 0 0 0 0 0 0
58014 - 0 0 0 0 0 0 0 0 0 10 10 10
58015 - 38 38 38 78 78 78 6 6 6 2 2 6
58016 - 2 2 6 46 46 46 14 14 14 42 42 42
58017 -246 246 246 253 253 253 253 253 253 253 253 253
58018 -253 253 253 253 253 253 253 253 253 253 253 253
58019 -253 253 253 253 253 253 231 231 231 242 242 242
58020 -253 253 253 253 253 253 253 253 253 253 253 253
58021 -253 253 253 253 253 253 253 253 253 253 253 253
58022 -253 253 253 253 253 253 253 253 253 253 253 253
58023 -253 253 253 253 253 253 253 253 253 253 253 253
58024 -253 253 253 253 253 253 234 234 234 10 10 10
58025 - 2 2 6 2 2 6 22 22 22 14 14 14
58026 - 2 2 6 2 2 6 2 2 6 2 2 6
58027 - 2 2 6 66 66 66 62 62 62 22 22 22
58028 - 6 6 6 0 0 0 0 0 0 0 0 0
58029 - 0 0 0 0 0 0 0 0 0 0 0 0
58030 - 0 0 0 0 0 0 0 0 0 0 0 0
58031 - 0 0 0 0 0 0 0 0 0 0 0 0
58032 - 0 0 0 0 0 0 0 0 0 0 0 0
58033 - 0 0 0 0 0 0 0 0 0 0 0 0
58034 - 0 0 0 0 0 0 6 6 6 18 18 18
58035 - 50 50 50 74 74 74 2 2 6 2 2 6
58036 - 14 14 14 70 70 70 34 34 34 62 62 62
58037 -250 250 250 253 253 253 253 253 253 253 253 253
58038 -253 253 253 253 253 253 253 253 253 253 253 253
58039 -253 253 253 253 253 253 231 231 231 246 246 246
58040 -253 253 253 253 253 253 253 253 253 253 253 253
58041 -253 253 253 253 253 253 253 253 253 253 253 253
58042 -253 253 253 253 253 253 253 253 253 253 253 253
58043 -253 253 253 253 253 253 253 253 253 253 253 253
58044 -253 253 253 253 253 253 234 234 234 14 14 14
58045 - 2 2 6 2 2 6 30 30 30 2 2 6
58046 - 2 2 6 2 2 6 2 2 6 2 2 6
58047 - 2 2 6 66 66 66 62 62 62 22 22 22
58048 - 6 6 6 0 0 0 0 0 0 0 0 0
58049 - 0 0 0 0 0 0 0 0 0 0 0 0
58050 - 0 0 0 0 0 0 0 0 0 0 0 0
58051 - 0 0 0 0 0 0 0 0 0 0 0 0
58052 - 0 0 0 0 0 0 0 0 0 0 0 0
58053 - 0 0 0 0 0 0 0 0 0 0 0 0
58054 - 0 0 0 0 0 0 6 6 6 18 18 18
58055 - 54 54 54 62 62 62 2 2 6 2 2 6
58056 - 2 2 6 30 30 30 46 46 46 70 70 70
58057 -250 250 250 253 253 253 253 253 253 253 253 253
58058 -253 253 253 253 253 253 253 253 253 253 253 253
58059 -253 253 253 253 253 253 231 231 231 246 246 246
58060 -253 253 253 253 253 253 253 253 253 253 253 253
58061 -253 253 253 253 253 253 253 253 253 253 253 253
58062 -253 253 253 253 253 253 253 253 253 253 253 253
58063 -253 253 253 253 253 253 253 253 253 253 253 253
58064 -253 253 253 253 253 253 226 226 226 10 10 10
58065 - 2 2 6 6 6 6 30 30 30 2 2 6
58066 - 2 2 6 2 2 6 2 2 6 2 2 6
58067 - 2 2 6 66 66 66 58 58 58 22 22 22
58068 - 6 6 6 0 0 0 0 0 0 0 0 0
58069 - 0 0 0 0 0 0 0 0 0 0 0 0
58070 - 0 0 0 0 0 0 0 0 0 0 0 0
58071 - 0 0 0 0 0 0 0 0 0 0 0 0
58072 - 0 0 0 0 0 0 0 0 0 0 0 0
58073 - 0 0 0 0 0 0 0 0 0 0 0 0
58074 - 0 0 0 0 0 0 6 6 6 22 22 22
58075 - 58 58 58 62 62 62 2 2 6 2 2 6
58076 - 2 2 6 2 2 6 30 30 30 78 78 78
58077 -250 250 250 253 253 253 253 253 253 253 253 253
58078 -253 253 253 253 253 253 253 253 253 253 253 253
58079 -253 253 253 253 253 253 231 231 231 246 246 246
58080 -253 253 253 253 253 253 253 253 253 253 253 253
58081 -253 253 253 253 253 253 253 253 253 253 253 253
58082 -253 253 253 253 253 253 253 253 253 253 253 253
58083 -253 253 253 253 253 253 253 253 253 253 253 253
58084 -253 253 253 253 253 253 206 206 206 2 2 6
58085 - 22 22 22 34 34 34 18 14 6 22 22 22
58086 - 26 26 26 18 18 18 6 6 6 2 2 6
58087 - 2 2 6 82 82 82 54 54 54 18 18 18
58088 - 6 6 6 0 0 0 0 0 0 0 0 0
58089 - 0 0 0 0 0 0 0 0 0 0 0 0
58090 - 0 0 0 0 0 0 0 0 0 0 0 0
58091 - 0 0 0 0 0 0 0 0 0 0 0 0
58092 - 0 0 0 0 0 0 0 0 0 0 0 0
58093 - 0 0 0 0 0 0 0 0 0 0 0 0
58094 - 0 0 0 0 0 0 6 6 6 26 26 26
58095 - 62 62 62 106 106 106 74 54 14 185 133 11
58096 -210 162 10 121 92 8 6 6 6 62 62 62
58097 -238 238 238 253 253 253 253 253 253 253 253 253
58098 -253 253 253 253 253 253 253 253 253 253 253 253
58099 -253 253 253 253 253 253 231 231 231 246 246 246
58100 -253 253 253 253 253 253 253 253 253 253 253 253
58101 -253 253 253 253 253 253 253 253 253 253 253 253
58102 -253 253 253 253 253 253 253 253 253 253 253 253
58103 -253 253 253 253 253 253 253 253 253 253 253 253
58104 -253 253 253 253 253 253 158 158 158 18 18 18
58105 - 14 14 14 2 2 6 2 2 6 2 2 6
58106 - 6 6 6 18 18 18 66 66 66 38 38 38
58107 - 6 6 6 94 94 94 50 50 50 18 18 18
58108 - 6 6 6 0 0 0 0 0 0 0 0 0
58109 - 0 0 0 0 0 0 0 0 0 0 0 0
58110 - 0 0 0 0 0 0 0 0 0 0 0 0
58111 - 0 0 0 0 0 0 0 0 0 0 0 0
58112 - 0 0 0 0 0 0 0 0 0 0 0 0
58113 - 0 0 0 0 0 0 0 0 0 6 6 6
58114 - 10 10 10 10 10 10 18 18 18 38 38 38
58115 - 78 78 78 142 134 106 216 158 10 242 186 14
58116 -246 190 14 246 190 14 156 118 10 10 10 10
58117 - 90 90 90 238 238 238 253 253 253 253 253 253
58118 -253 253 253 253 253 253 253 253 253 253 253 253
58119 -253 253 253 253 253 253 231 231 231 250 250 250
58120 -253 253 253 253 253 253 253 253 253 253 253 253
58121 -253 253 253 253 253 253 253 253 253 253 253 253
58122 -253 253 253 253 253 253 253 253 253 253 253 253
58123 -253 253 253 253 253 253 253 253 253 246 230 190
58124 -238 204 91 238 204 91 181 142 44 37 26 9
58125 - 2 2 6 2 2 6 2 2 6 2 2 6
58126 - 2 2 6 2 2 6 38 38 38 46 46 46
58127 - 26 26 26 106 106 106 54 54 54 18 18 18
58128 - 6 6 6 0 0 0 0 0 0 0 0 0
58129 - 0 0 0 0 0 0 0 0 0 0 0 0
58130 - 0 0 0 0 0 0 0 0 0 0 0 0
58131 - 0 0 0 0 0 0 0 0 0 0 0 0
58132 - 0 0 0 0 0 0 0 0 0 0 0 0
58133 - 0 0 0 6 6 6 14 14 14 22 22 22
58134 - 30 30 30 38 38 38 50 50 50 70 70 70
58135 -106 106 106 190 142 34 226 170 11 242 186 14
58136 -246 190 14 246 190 14 246 190 14 154 114 10
58137 - 6 6 6 74 74 74 226 226 226 253 253 253
58138 -253 253 253 253 253 253 253 253 253 253 253 253
58139 -253 253 253 253 253 253 231 231 231 250 250 250
58140 -253 253 253 253 253 253 253 253 253 253 253 253
58141 -253 253 253 253 253 253 253 253 253 253 253 253
58142 -253 253 253 253 253 253 253 253 253 253 253 253
58143 -253 253 253 253 253 253 253 253 253 228 184 62
58144 -241 196 14 241 208 19 232 195 16 38 30 10
58145 - 2 2 6 2 2 6 2 2 6 2 2 6
58146 - 2 2 6 6 6 6 30 30 30 26 26 26
58147 -203 166 17 154 142 90 66 66 66 26 26 26
58148 - 6 6 6 0 0 0 0 0 0 0 0 0
58149 - 0 0 0 0 0 0 0 0 0 0 0 0
58150 - 0 0 0 0 0 0 0 0 0 0 0 0
58151 - 0 0 0 0 0 0 0 0 0 0 0 0
58152 - 0 0 0 0 0 0 0 0 0 0 0 0
58153 - 6 6 6 18 18 18 38 38 38 58 58 58
58154 - 78 78 78 86 86 86 101 101 101 123 123 123
58155 -175 146 61 210 150 10 234 174 13 246 186 14
58156 -246 190 14 246 190 14 246 190 14 238 190 10
58157 -102 78 10 2 2 6 46 46 46 198 198 198
58158 -253 253 253 253 253 253 253 253 253 253 253 253
58159 -253 253 253 253 253 253 234 234 234 242 242 242
58160 -253 253 253 253 253 253 253 253 253 253 253 253
58161 -253 253 253 253 253 253 253 253 253 253 253 253
58162 -253 253 253 253 253 253 253 253 253 253 253 253
58163 -253 253 253 253 253 253 253 253 253 224 178 62
58164 -242 186 14 241 196 14 210 166 10 22 18 6
58165 - 2 2 6 2 2 6 2 2 6 2 2 6
58166 - 2 2 6 2 2 6 6 6 6 121 92 8
58167 -238 202 15 232 195 16 82 82 82 34 34 34
58168 - 10 10 10 0 0 0 0 0 0 0 0 0
58169 - 0 0 0 0 0 0 0 0 0 0 0 0
58170 - 0 0 0 0 0 0 0 0 0 0 0 0
58171 - 0 0 0 0 0 0 0 0 0 0 0 0
58172 - 0 0 0 0 0 0 0 0 0 0 0 0
58173 - 14 14 14 38 38 38 70 70 70 154 122 46
58174 -190 142 34 200 144 11 197 138 11 197 138 11
58175 -213 154 11 226 170 11 242 186 14 246 190 14
58176 -246 190 14 246 190 14 246 190 14 246 190 14
58177 -225 175 15 46 32 6 2 2 6 22 22 22
58178 -158 158 158 250 250 250 253 253 253 253 253 253
58179 -253 253 253 253 253 253 253 253 253 253 253 253
58180 -253 253 253 253 253 253 253 253 253 253 253 253
58181 -253 253 253 253 253 253 253 253 253 253 253 253
58182 -253 253 253 253 253 253 253 253 253 253 253 253
58183 -253 253 253 250 250 250 242 242 242 224 178 62
58184 -239 182 13 236 186 11 213 154 11 46 32 6
58185 - 2 2 6 2 2 6 2 2 6 2 2 6
58186 - 2 2 6 2 2 6 61 42 6 225 175 15
58187 -238 190 10 236 186 11 112 100 78 42 42 42
58188 - 14 14 14 0 0 0 0 0 0 0 0 0
58189 - 0 0 0 0 0 0 0 0 0 0 0 0
58190 - 0 0 0 0 0 0 0 0 0 0 0 0
58191 - 0 0 0 0 0 0 0 0 0 0 0 0
58192 - 0 0 0 0 0 0 0 0 0 6 6 6
58193 - 22 22 22 54 54 54 154 122 46 213 154 11
58194 -226 170 11 230 174 11 226 170 11 226 170 11
58195 -236 178 12 242 186 14 246 190 14 246 190 14
58196 -246 190 14 246 190 14 246 190 14 246 190 14
58197 -241 196 14 184 144 12 10 10 10 2 2 6
58198 - 6 6 6 116 116 116 242 242 242 253 253 253
58199 -253 253 253 253 253 253 253 253 253 253 253 253
58200 -253 253 253 253 253 253 253 253 253 253 253 253
58201 -253 253 253 253 253 253 253 253 253 253 253 253
58202 -253 253 253 253 253 253 253 253 253 253 253 253
58203 -253 253 253 231 231 231 198 198 198 214 170 54
58204 -236 178 12 236 178 12 210 150 10 137 92 6
58205 - 18 14 6 2 2 6 2 2 6 2 2 6
58206 - 6 6 6 70 47 6 200 144 11 236 178 12
58207 -239 182 13 239 182 13 124 112 88 58 58 58
58208 - 22 22 22 6 6 6 0 0 0 0 0 0
58209 - 0 0 0 0 0 0 0 0 0 0 0 0
58210 - 0 0 0 0 0 0 0 0 0 0 0 0
58211 - 0 0 0 0 0 0 0 0 0 0 0 0
58212 - 0 0 0 0 0 0 0 0 0 10 10 10
58213 - 30 30 30 70 70 70 180 133 36 226 170 11
58214 -239 182 13 242 186 14 242 186 14 246 186 14
58215 -246 190 14 246 190 14 246 190 14 246 190 14
58216 -246 190 14 246 190 14 246 190 14 246 190 14
58217 -246 190 14 232 195 16 98 70 6 2 2 6
58218 - 2 2 6 2 2 6 66 66 66 221 221 221
58219 -253 253 253 253 253 253 253 253 253 253 253 253
58220 -253 253 253 253 253 253 253 253 253 253 253 253
58221 -253 253 253 253 253 253 253 253 253 253 253 253
58222 -253 253 253 253 253 253 253 253 253 253 253 253
58223 -253 253 253 206 206 206 198 198 198 214 166 58
58224 -230 174 11 230 174 11 216 158 10 192 133 9
58225 -163 110 8 116 81 8 102 78 10 116 81 8
58226 -167 114 7 197 138 11 226 170 11 239 182 13
58227 -242 186 14 242 186 14 162 146 94 78 78 78
58228 - 34 34 34 14 14 14 6 6 6 0 0 0
58229 - 0 0 0 0 0 0 0 0 0 0 0 0
58230 - 0 0 0 0 0 0 0 0 0 0 0 0
58231 - 0 0 0 0 0 0 0 0 0 0 0 0
58232 - 0 0 0 0 0 0 0 0 0 6 6 6
58233 - 30 30 30 78 78 78 190 142 34 226 170 11
58234 -239 182 13 246 190 14 246 190 14 246 190 14
58235 -246 190 14 246 190 14 246 190 14 246 190 14
58236 -246 190 14 246 190 14 246 190 14 246 190 14
58237 -246 190 14 241 196 14 203 166 17 22 18 6
58238 - 2 2 6 2 2 6 2 2 6 38 38 38
58239 -218 218 218 253 253 253 253 253 253 253 253 253
58240 -253 253 253 253 253 253 253 253 253 253 253 253
58241 -253 253 253 253 253 253 253 253 253 253 253 253
58242 -253 253 253 253 253 253 253 253 253 253 253 253
58243 -250 250 250 206 206 206 198 198 198 202 162 69
58244 -226 170 11 236 178 12 224 166 10 210 150 10
58245 -200 144 11 197 138 11 192 133 9 197 138 11
58246 -210 150 10 226 170 11 242 186 14 246 190 14
58247 -246 190 14 246 186 14 225 175 15 124 112 88
58248 - 62 62 62 30 30 30 14 14 14 6 6 6
58249 - 0 0 0 0 0 0 0 0 0 0 0 0
58250 - 0 0 0 0 0 0 0 0 0 0 0 0
58251 - 0 0 0 0 0 0 0 0 0 0 0 0
58252 - 0 0 0 0 0 0 0 0 0 10 10 10
58253 - 30 30 30 78 78 78 174 135 50 224 166 10
58254 -239 182 13 246 190 14 246 190 14 246 190 14
58255 -246 190 14 246 190 14 246 190 14 246 190 14
58256 -246 190 14 246 190 14 246 190 14 246 190 14
58257 -246 190 14 246 190 14 241 196 14 139 102 15
58258 - 2 2 6 2 2 6 2 2 6 2 2 6
58259 - 78 78 78 250 250 250 253 253 253 253 253 253
58260 -253 253 253 253 253 253 253 253 253 253 253 253
58261 -253 253 253 253 253 253 253 253 253 253 253 253
58262 -253 253 253 253 253 253 253 253 253 253 253 253
58263 -250 250 250 214 214 214 198 198 198 190 150 46
58264 -219 162 10 236 178 12 234 174 13 224 166 10
58265 -216 158 10 213 154 11 213 154 11 216 158 10
58266 -226 170 11 239 182 13 246 190 14 246 190 14
58267 -246 190 14 246 190 14 242 186 14 206 162 42
58268 -101 101 101 58 58 58 30 30 30 14 14 14
58269 - 6 6 6 0 0 0 0 0 0 0 0 0
58270 - 0 0 0 0 0 0 0 0 0 0 0 0
58271 - 0 0 0 0 0 0 0 0 0 0 0 0
58272 - 0 0 0 0 0 0 0 0 0 10 10 10
58273 - 30 30 30 74 74 74 174 135 50 216 158 10
58274 -236 178 12 246 190 14 246 190 14 246 190 14
58275 -246 190 14 246 190 14 246 190 14 246 190 14
58276 -246 190 14 246 190 14 246 190 14 246 190 14
58277 -246 190 14 246 190 14 241 196 14 226 184 13
58278 - 61 42 6 2 2 6 2 2 6 2 2 6
58279 - 22 22 22 238 238 238 253 253 253 253 253 253
58280 -253 253 253 253 253 253 253 253 253 253 253 253
58281 -253 253 253 253 253 253 253 253 253 253 253 253
58282 -253 253 253 253 253 253 253 253 253 253 253 253
58283 -253 253 253 226 226 226 187 187 187 180 133 36
58284 -216 158 10 236 178 12 239 182 13 236 178 12
58285 -230 174 11 226 170 11 226 170 11 230 174 11
58286 -236 178 12 242 186 14 246 190 14 246 190 14
58287 -246 190 14 246 190 14 246 186 14 239 182 13
58288 -206 162 42 106 106 106 66 66 66 34 34 34
58289 - 14 14 14 6 6 6 0 0 0 0 0 0
58290 - 0 0 0 0 0 0 0 0 0 0 0 0
58291 - 0 0 0 0 0 0 0 0 0 0 0 0
58292 - 0 0 0 0 0 0 0 0 0 6 6 6
58293 - 26 26 26 70 70 70 163 133 67 213 154 11
58294 -236 178 12 246 190 14 246 190 14 246 190 14
58295 -246 190 14 246 190 14 246 190 14 246 190 14
58296 -246 190 14 246 190 14 246 190 14 246 190 14
58297 -246 190 14 246 190 14 246 190 14 241 196 14
58298 -190 146 13 18 14 6 2 2 6 2 2 6
58299 - 46 46 46 246 246 246 253 253 253 253 253 253
58300 -253 253 253 253 253 253 253 253 253 253 253 253
58301 -253 253 253 253 253 253 253 253 253 253 253 253
58302 -253 253 253 253 253 253 253 253 253 253 253 253
58303 -253 253 253 221 221 221 86 86 86 156 107 11
58304 -216 158 10 236 178 12 242 186 14 246 186 14
58305 -242 186 14 239 182 13 239 182 13 242 186 14
58306 -242 186 14 246 186 14 246 190 14 246 190 14
58307 -246 190 14 246 190 14 246 190 14 246 190 14
58308 -242 186 14 225 175 15 142 122 72 66 66 66
58309 - 30 30 30 10 10 10 0 0 0 0 0 0
58310 - 0 0 0 0 0 0 0 0 0 0 0 0
58311 - 0 0 0 0 0 0 0 0 0 0 0 0
58312 - 0 0 0 0 0 0 0 0 0 6 6 6
58313 - 26 26 26 70 70 70 163 133 67 210 150 10
58314 -236 178 12 246 190 14 246 190 14 246 190 14
58315 -246 190 14 246 190 14 246 190 14 246 190 14
58316 -246 190 14 246 190 14 246 190 14 246 190 14
58317 -246 190 14 246 190 14 246 190 14 246 190 14
58318 -232 195 16 121 92 8 34 34 34 106 106 106
58319 -221 221 221 253 253 253 253 253 253 253 253 253
58320 -253 253 253 253 253 253 253 253 253 253 253 253
58321 -253 253 253 253 253 253 253 253 253 253 253 253
58322 -253 253 253 253 253 253 253 253 253 253 253 253
58323 -242 242 242 82 82 82 18 14 6 163 110 8
58324 -216 158 10 236 178 12 242 186 14 246 190 14
58325 -246 190 14 246 190 14 246 190 14 246 190 14
58326 -246 190 14 246 190 14 246 190 14 246 190 14
58327 -246 190 14 246 190 14 246 190 14 246 190 14
58328 -246 190 14 246 190 14 242 186 14 163 133 67
58329 - 46 46 46 18 18 18 6 6 6 0 0 0
58330 - 0 0 0 0 0 0 0 0 0 0 0 0
58331 - 0 0 0 0 0 0 0 0 0 0 0 0
58332 - 0 0 0 0 0 0 0 0 0 10 10 10
58333 - 30 30 30 78 78 78 163 133 67 210 150 10
58334 -236 178 12 246 186 14 246 190 14 246 190 14
58335 -246 190 14 246 190 14 246 190 14 246 190 14
58336 -246 190 14 246 190 14 246 190 14 246 190 14
58337 -246 190 14 246 190 14 246 190 14 246 190 14
58338 -241 196 14 215 174 15 190 178 144 253 253 253
58339 -253 253 253 253 253 253 253 253 253 253 253 253
58340 -253 253 253 253 253 253 253 253 253 253 253 253
58341 -253 253 253 253 253 253 253 253 253 253 253 253
58342 -253 253 253 253 253 253 253 253 253 218 218 218
58343 - 58 58 58 2 2 6 22 18 6 167 114 7
58344 -216 158 10 236 178 12 246 186 14 246 190 14
58345 -246 190 14 246 190 14 246 190 14 246 190 14
58346 -246 190 14 246 190 14 246 190 14 246 190 14
58347 -246 190 14 246 190 14 246 190 14 246 190 14
58348 -246 190 14 246 186 14 242 186 14 190 150 46
58349 - 54 54 54 22 22 22 6 6 6 0 0 0
58350 - 0 0 0 0 0 0 0 0 0 0 0 0
58351 - 0 0 0 0 0 0 0 0 0 0 0 0
58352 - 0 0 0 0 0 0 0 0 0 14 14 14
58353 - 38 38 38 86 86 86 180 133 36 213 154 11
58354 -236 178 12 246 186 14 246 190 14 246 190 14
58355 -246 190 14 246 190 14 246 190 14 246 190 14
58356 -246 190 14 246 190 14 246 190 14 246 190 14
58357 -246 190 14 246 190 14 246 190 14 246 190 14
58358 -246 190 14 232 195 16 190 146 13 214 214 214
58359 -253 253 253 253 253 253 253 253 253 253 253 253
58360 -253 253 253 253 253 253 253 253 253 253 253 253
58361 -253 253 253 253 253 253 253 253 253 253 253 253
58362 -253 253 253 250 250 250 170 170 170 26 26 26
58363 - 2 2 6 2 2 6 37 26 9 163 110 8
58364 -219 162 10 239 182 13 246 186 14 246 190 14
58365 -246 190 14 246 190 14 246 190 14 246 190 14
58366 -246 190 14 246 190 14 246 190 14 246 190 14
58367 -246 190 14 246 190 14 246 190 14 246 190 14
58368 -246 186 14 236 178 12 224 166 10 142 122 72
58369 - 46 46 46 18 18 18 6 6 6 0 0 0
58370 - 0 0 0 0 0 0 0 0 0 0 0 0
58371 - 0 0 0 0 0 0 0 0 0 0 0 0
58372 - 0 0 0 0 0 0 6 6 6 18 18 18
58373 - 50 50 50 109 106 95 192 133 9 224 166 10
58374 -242 186 14 246 190 14 246 190 14 246 190 14
58375 -246 190 14 246 190 14 246 190 14 246 190 14
58376 -246 190 14 246 190 14 246 190 14 246 190 14
58377 -246 190 14 246 190 14 246 190 14 246 190 14
58378 -242 186 14 226 184 13 210 162 10 142 110 46
58379 -226 226 226 253 253 253 253 253 253 253 253 253
58380 -253 253 253 253 253 253 253 253 253 253 253 253
58381 -253 253 253 253 253 253 253 253 253 253 253 253
58382 -198 198 198 66 66 66 2 2 6 2 2 6
58383 - 2 2 6 2 2 6 50 34 6 156 107 11
58384 -219 162 10 239 182 13 246 186 14 246 190 14
58385 -246 190 14 246 190 14 246 190 14 246 190 14
58386 -246 190 14 246 190 14 246 190 14 246 190 14
58387 -246 190 14 246 190 14 246 190 14 242 186 14
58388 -234 174 13 213 154 11 154 122 46 66 66 66
58389 - 30 30 30 10 10 10 0 0 0 0 0 0
58390 - 0 0 0 0 0 0 0 0 0 0 0 0
58391 - 0 0 0 0 0 0 0 0 0 0 0 0
58392 - 0 0 0 0 0 0 6 6 6 22 22 22
58393 - 58 58 58 154 121 60 206 145 10 234 174 13
58394 -242 186 14 246 186 14 246 190 14 246 190 14
58395 -246 190 14 246 190 14 246 190 14 246 190 14
58396 -246 190 14 246 190 14 246 190 14 246 190 14
58397 -246 190 14 246 190 14 246 190 14 246 190 14
58398 -246 186 14 236 178 12 210 162 10 163 110 8
58399 - 61 42 6 138 138 138 218 218 218 250 250 250
58400 -253 253 253 253 253 253 253 253 253 250 250 250
58401 -242 242 242 210 210 210 144 144 144 66 66 66
58402 - 6 6 6 2 2 6 2 2 6 2 2 6
58403 - 2 2 6 2 2 6 61 42 6 163 110 8
58404 -216 158 10 236 178 12 246 190 14 246 190 14
58405 -246 190 14 246 190 14 246 190 14 246 190 14
58406 -246 190 14 246 190 14 246 190 14 246 190 14
58407 -246 190 14 239 182 13 230 174 11 216 158 10
58408 -190 142 34 124 112 88 70 70 70 38 38 38
58409 - 18 18 18 6 6 6 0 0 0 0 0 0
58410 - 0 0 0 0 0 0 0 0 0 0 0 0
58411 - 0 0 0 0 0 0 0 0 0 0 0 0
58412 - 0 0 0 0 0 0 6 6 6 22 22 22
58413 - 62 62 62 168 124 44 206 145 10 224 166 10
58414 -236 178 12 239 182 13 242 186 14 242 186 14
58415 -246 186 14 246 190 14 246 190 14 246 190 14
58416 -246 190 14 246 190 14 246 190 14 246 190 14
58417 -246 190 14 246 190 14 246 190 14 246 190 14
58418 -246 190 14 236 178 12 216 158 10 175 118 6
58419 - 80 54 7 2 2 6 6 6 6 30 30 30
58420 - 54 54 54 62 62 62 50 50 50 38 38 38
58421 - 14 14 14 2 2 6 2 2 6 2 2 6
58422 - 2 2 6 2 2 6 2 2 6 2 2 6
58423 - 2 2 6 6 6 6 80 54 7 167 114 7
58424 -213 154 11 236 178 12 246 190 14 246 190 14
58425 -246 190 14 246 190 14 246 190 14 246 190 14
58426 -246 190 14 242 186 14 239 182 13 239 182 13
58427 -230 174 11 210 150 10 174 135 50 124 112 88
58428 - 82 82 82 54 54 54 34 34 34 18 18 18
58429 - 6 6 6 0 0 0 0 0 0 0 0 0
58430 - 0 0 0 0 0 0 0 0 0 0 0 0
58431 - 0 0 0 0 0 0 0 0 0 0 0 0
58432 - 0 0 0 0 0 0 6 6 6 18 18 18
58433 - 50 50 50 158 118 36 192 133 9 200 144 11
58434 -216 158 10 219 162 10 224 166 10 226 170 11
58435 -230 174 11 236 178 12 239 182 13 239 182 13
58436 -242 186 14 246 186 14 246 190 14 246 190 14
58437 -246 190 14 246 190 14 246 190 14 246 190 14
58438 -246 186 14 230 174 11 210 150 10 163 110 8
58439 -104 69 6 10 10 10 2 2 6 2 2 6
58440 - 2 2 6 2 2 6 2 2 6 2 2 6
58441 - 2 2 6 2 2 6 2 2 6 2 2 6
58442 - 2 2 6 2 2 6 2 2 6 2 2 6
58443 - 2 2 6 6 6 6 91 60 6 167 114 7
58444 -206 145 10 230 174 11 242 186 14 246 190 14
58445 -246 190 14 246 190 14 246 186 14 242 186 14
58446 -239 182 13 230 174 11 224 166 10 213 154 11
58447 -180 133 36 124 112 88 86 86 86 58 58 58
58448 - 38 38 38 22 22 22 10 10 10 6 6 6
58449 - 0 0 0 0 0 0 0 0 0 0 0 0
58450 - 0 0 0 0 0 0 0 0 0 0 0 0
58451 - 0 0 0 0 0 0 0 0 0 0 0 0
58452 - 0 0 0 0 0 0 0 0 0 14 14 14
58453 - 34 34 34 70 70 70 138 110 50 158 118 36
58454 -167 114 7 180 123 7 192 133 9 197 138 11
58455 -200 144 11 206 145 10 213 154 11 219 162 10
58456 -224 166 10 230 174 11 239 182 13 242 186 14
58457 -246 186 14 246 186 14 246 186 14 246 186 14
58458 -239 182 13 216 158 10 185 133 11 152 99 6
58459 -104 69 6 18 14 6 2 2 6 2 2 6
58460 - 2 2 6 2 2 6 2 2 6 2 2 6
58461 - 2 2 6 2 2 6 2 2 6 2 2 6
58462 - 2 2 6 2 2 6 2 2 6 2 2 6
58463 - 2 2 6 6 6 6 80 54 7 152 99 6
58464 -192 133 9 219 162 10 236 178 12 239 182 13
58465 -246 186 14 242 186 14 239 182 13 236 178 12
58466 -224 166 10 206 145 10 192 133 9 154 121 60
58467 - 94 94 94 62 62 62 42 42 42 22 22 22
58468 - 14 14 14 6 6 6 0 0 0 0 0 0
58469 - 0 0 0 0 0 0 0 0 0 0 0 0
58470 - 0 0 0 0 0 0 0 0 0 0 0 0
58471 - 0 0 0 0 0 0 0 0 0 0 0 0
58472 - 0 0 0 0 0 0 0 0 0 6 6 6
58473 - 18 18 18 34 34 34 58 58 58 78 78 78
58474 -101 98 89 124 112 88 142 110 46 156 107 11
58475 -163 110 8 167 114 7 175 118 6 180 123 7
58476 -185 133 11 197 138 11 210 150 10 219 162 10
58477 -226 170 11 236 178 12 236 178 12 234 174 13
58478 -219 162 10 197 138 11 163 110 8 130 83 6
58479 - 91 60 6 10 10 10 2 2 6 2 2 6
58480 - 18 18 18 38 38 38 38 38 38 38 38 38
58481 - 38 38 38 38 38 38 38 38 38 38 38 38
58482 - 38 38 38 38 38 38 26 26 26 2 2 6
58483 - 2 2 6 6 6 6 70 47 6 137 92 6
58484 -175 118 6 200 144 11 219 162 10 230 174 11
58485 -234 174 13 230 174 11 219 162 10 210 150 10
58486 -192 133 9 163 110 8 124 112 88 82 82 82
58487 - 50 50 50 30 30 30 14 14 14 6 6 6
58488 - 0 0 0 0 0 0 0 0 0 0 0 0
58489 - 0 0 0 0 0 0 0 0 0 0 0 0
58490 - 0 0 0 0 0 0 0 0 0 0 0 0
58491 - 0 0 0 0 0 0 0 0 0 0 0 0
58492 - 0 0 0 0 0 0 0 0 0 0 0 0
58493 - 6 6 6 14 14 14 22 22 22 34 34 34
58494 - 42 42 42 58 58 58 74 74 74 86 86 86
58495 -101 98 89 122 102 70 130 98 46 121 87 25
58496 -137 92 6 152 99 6 163 110 8 180 123 7
58497 -185 133 11 197 138 11 206 145 10 200 144 11
58498 -180 123 7 156 107 11 130 83 6 104 69 6
58499 - 50 34 6 54 54 54 110 110 110 101 98 89
58500 - 86 86 86 82 82 82 78 78 78 78 78 78
58501 - 78 78 78 78 78 78 78 78 78 78 78 78
58502 - 78 78 78 82 82 82 86 86 86 94 94 94
58503 -106 106 106 101 101 101 86 66 34 124 80 6
58504 -156 107 11 180 123 7 192 133 9 200 144 11
58505 -206 145 10 200 144 11 192 133 9 175 118 6
58506 -139 102 15 109 106 95 70 70 70 42 42 42
58507 - 22 22 22 10 10 10 0 0 0 0 0 0
58508 - 0 0 0 0 0 0 0 0 0 0 0 0
58509 - 0 0 0 0 0 0 0 0 0 0 0 0
58510 - 0 0 0 0 0 0 0 0 0 0 0 0
58511 - 0 0 0 0 0 0 0 0 0 0 0 0
58512 - 0 0 0 0 0 0 0 0 0 0 0 0
58513 - 0 0 0 0 0 0 6 6 6 10 10 10
58514 - 14 14 14 22 22 22 30 30 30 38 38 38
58515 - 50 50 50 62 62 62 74 74 74 90 90 90
58516 -101 98 89 112 100 78 121 87 25 124 80 6
58517 -137 92 6 152 99 6 152 99 6 152 99 6
58518 -138 86 6 124 80 6 98 70 6 86 66 30
58519 -101 98 89 82 82 82 58 58 58 46 46 46
58520 - 38 38 38 34 34 34 34 34 34 34 34 34
58521 - 34 34 34 34 34 34 34 34 34 34 34 34
58522 - 34 34 34 34 34 34 38 38 38 42 42 42
58523 - 54 54 54 82 82 82 94 86 76 91 60 6
58524 -134 86 6 156 107 11 167 114 7 175 118 6
58525 -175 118 6 167 114 7 152 99 6 121 87 25
58526 -101 98 89 62 62 62 34 34 34 18 18 18
58527 - 6 6 6 0 0 0 0 0 0 0 0 0
58528 - 0 0 0 0 0 0 0 0 0 0 0 0
58529 - 0 0 0 0 0 0 0 0 0 0 0 0
58530 - 0 0 0 0 0 0 0 0 0 0 0 0
58531 - 0 0 0 0 0 0 0 0 0 0 0 0
58532 - 0 0 0 0 0 0 0 0 0 0 0 0
58533 - 0 0 0 0 0 0 0 0 0 0 0 0
58534 - 0 0 0 6 6 6 6 6 6 10 10 10
58535 - 18 18 18 22 22 22 30 30 30 42 42 42
58536 - 50 50 50 66 66 66 86 86 86 101 98 89
58537 -106 86 58 98 70 6 104 69 6 104 69 6
58538 -104 69 6 91 60 6 82 62 34 90 90 90
58539 - 62 62 62 38 38 38 22 22 22 14 14 14
58540 - 10 10 10 10 10 10 10 10 10 10 10 10
58541 - 10 10 10 10 10 10 6 6 6 10 10 10
58542 - 10 10 10 10 10 10 10 10 10 14 14 14
58543 - 22 22 22 42 42 42 70 70 70 89 81 66
58544 - 80 54 7 104 69 6 124 80 6 137 92 6
58545 -134 86 6 116 81 8 100 82 52 86 86 86
58546 - 58 58 58 30 30 30 14 14 14 6 6 6
58547 - 0 0 0 0 0 0 0 0 0 0 0 0
58548 - 0 0 0 0 0 0 0 0 0 0 0 0
58549 - 0 0 0 0 0 0 0 0 0 0 0 0
58550 - 0 0 0 0 0 0 0 0 0 0 0 0
58551 - 0 0 0 0 0 0 0 0 0 0 0 0
58552 - 0 0 0 0 0 0 0 0 0 0 0 0
58553 - 0 0 0 0 0 0 0 0 0 0 0 0
58554 - 0 0 0 0 0 0 0 0 0 0 0 0
58555 - 0 0 0 6 6 6 10 10 10 14 14 14
58556 - 18 18 18 26 26 26 38 38 38 54 54 54
58557 - 70 70 70 86 86 86 94 86 76 89 81 66
58558 - 89 81 66 86 86 86 74 74 74 50 50 50
58559 - 30 30 30 14 14 14 6 6 6 0 0 0
58560 - 0 0 0 0 0 0 0 0 0 0 0 0
58561 - 0 0 0 0 0 0 0 0 0 0 0 0
58562 - 0 0 0 0 0 0 0 0 0 0 0 0
58563 - 6 6 6 18 18 18 34 34 34 58 58 58
58564 - 82 82 82 89 81 66 89 81 66 89 81 66
58565 - 94 86 66 94 86 76 74 74 74 50 50 50
58566 - 26 26 26 14 14 14 6 6 6 0 0 0
58567 - 0 0 0 0 0 0 0 0 0 0 0 0
58568 - 0 0 0 0 0 0 0 0 0 0 0 0
58569 - 0 0 0 0 0 0 0 0 0 0 0 0
58570 - 0 0 0 0 0 0 0 0 0 0 0 0
58571 - 0 0 0 0 0 0 0 0 0 0 0 0
58572 - 0 0 0 0 0 0 0 0 0 0 0 0
58573 - 0 0 0 0 0 0 0 0 0 0 0 0
58574 - 0 0 0 0 0 0 0 0 0 0 0 0
58575 - 0 0 0 0 0 0 0 0 0 0 0 0
58576 - 6 6 6 6 6 6 14 14 14 18 18 18
58577 - 30 30 30 38 38 38 46 46 46 54 54 54
58578 - 50 50 50 42 42 42 30 30 30 18 18 18
58579 - 10 10 10 0 0 0 0 0 0 0 0 0
58580 - 0 0 0 0 0 0 0 0 0 0 0 0
58581 - 0 0 0 0 0 0 0 0 0 0 0 0
58582 - 0 0 0 0 0 0 0 0 0 0 0 0
58583 - 0 0 0 6 6 6 14 14 14 26 26 26
58584 - 38 38 38 50 50 50 58 58 58 58 58 58
58585 - 54 54 54 42 42 42 30 30 30 18 18 18
58586 - 10 10 10 0 0 0 0 0 0 0 0 0
58587 - 0 0 0 0 0 0 0 0 0 0 0 0
58588 - 0 0 0 0 0 0 0 0 0 0 0 0
58589 - 0 0 0 0 0 0 0 0 0 0 0 0
58590 - 0 0 0 0 0 0 0 0 0 0 0 0
58591 - 0 0 0 0 0 0 0 0 0 0 0 0
58592 - 0 0 0 0 0 0 0 0 0 0 0 0
58593 - 0 0 0 0 0 0 0 0 0 0 0 0
58594 - 0 0 0 0 0 0 0 0 0 0 0 0
58595 - 0 0 0 0 0 0 0 0 0 0 0 0
58596 - 0 0 0 0 0 0 0 0 0 6 6 6
58597 - 6 6 6 10 10 10 14 14 14 18 18 18
58598 - 18 18 18 14 14 14 10 10 10 6 6 6
58599 - 0 0 0 0 0 0 0 0 0 0 0 0
58600 - 0 0 0 0 0 0 0 0 0 0 0 0
58601 - 0 0 0 0 0 0 0 0 0 0 0 0
58602 - 0 0 0 0 0 0 0 0 0 0 0 0
58603 - 0 0 0 0 0 0 0 0 0 6 6 6
58604 - 14 14 14 18 18 18 22 22 22 22 22 22
58605 - 18 18 18 14 14 14 10 10 10 6 6 6
58606 - 0 0 0 0 0 0 0 0 0 0 0 0
58607 - 0 0 0 0 0 0 0 0 0 0 0 0
58608 - 0 0 0 0 0 0 0 0 0 0 0 0
58609 - 0 0 0 0 0 0 0 0 0 0 0 0
58610 - 0 0 0 0 0 0 0 0 0 0 0 0
58611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58699 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58700 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58704 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58705 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58706 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58713 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58714 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58715 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58718 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58719 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58720 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58721 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58727 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58728 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58729 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58732 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58733 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58734 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58735 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58740 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58741 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58742 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58743 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58745 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58746 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58747 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58748 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58749 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58754 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58755 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58756 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58757 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58758 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58759 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58760 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58761 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58762 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58763 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58768 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58769 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58770 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58771 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58772 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58773 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58774 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58775 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58776 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58777 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58781 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58782 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58783 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58784 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58785 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58786 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58787 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58788 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58789 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58790 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58791 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58795 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58796 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58797 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58798 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58799 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58800 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58801 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58802 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58803 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58804 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58805 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58809 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58810 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58811 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58812 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58813 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58814 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58815 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58816 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58817 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58818 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58819 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58823 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58824 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58825 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58826 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58827 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58828 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58829 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58830 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58831 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58832 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58833 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58836 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58837 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58838 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58839 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58840 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58841 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58842 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58843 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58844 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58845 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58846 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58847 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58850 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58851 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58852 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174